diff --git a/.ansible-lint b/.ansible-lint new file mode 100644 index 00000000..101357cb --- /dev/null +++ b/.ansible-lint @@ -0,0 +1,24 @@ +--- +skip_list: + - yaml[line-length] + - name[casing] + - yaml[comments] +quiet: true +exclude_paths: + - .cache/ # implicit unless exclude_paths is defined in config + - collections/ + - roles/PyratLabs.k3s + - roles/gantsign.ctop + - roles/geerlingguy.ansible + - roles/geerlingguy.docker + - roles/geerlingguy.helm + - roles/geerlingguy.nfs_server + - roles/geerlingguy.pip + - roles/hifis-net.unattended_upgrades + - roles/l3d.gitea + - roles/mrlesmithjr.ansible-manage-lvm + - roles/oefenweb.ufw + - roles/pandemonium1986.ansible-role-k9s + - roles/robertdebock.bootstrap + - roles/PyratLabs.k3s + - .gitlab-ci.yml diff --git a/.ansible-lint-ignore b/.ansible-lint-ignore new file mode 100644 index 00000000..2b18e9e6 --- /dev/null +++ b/.ansible-lint-ignore @@ -0,0 +1,3 @@ +# This file contains ignores rule violations for ansible-lint +playbooks/on-off/remove_old_ssh_key.yml name[play] +playbooks/on-off/remove_old_ssh_key.yml yaml[truthy] diff --git a/.drone.yml b/.drone.yml index 491ad1d4..dab97558 100644 --- a/.drone.yml +++ b/.drone.yml @@ -19,15 +19,18 @@ type: docker name: ansible-lint steps: - name: ansible-lint - image: cytopia/ansible-lint + image: quay.io/ansible/creator-ee commands: - - /usr/bin/ansible-lint *.* + - ansible-lint --version + - echo $ANSIBLE_VAULT_PASSWORD > ./vault-pass.yml + - ansible-lint when: event: exclude: - tag -depends_on: - - gitleaks + environment: + ANSIBLE_VAULT_PASSWORD: + from_secret: vault-pass --- kind: pipeline @@ -42,5 +45,3 @@ steps: event: exclude: - tag -depends_on: - - gitleaks diff --git a/.gitignore b/.gitignore index e70d0af2..845533c2 100644 --- a/.gitignore +++ b/.gitignore @@ -1,10 +1,5 @@ .git/ vault-pass.yml id_rsa_ansible_user -id_rsa_ansible_user_pub -id_rsa_ansible_user.pub -plugins/lookup/__pycache__/** -plugins/callback/__pycache__/ -trace/**json id_ed25519 id_ed25519.pub diff --git a/ansible.cfg b/ansible.cfg index aaa758e4..ec0906a8 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -4,30 +4,9 @@ nocows = 1 retry_files_enabled = False roles_path = ./roles lookup_plugins = ./plugins/lookup -collections_paths = ./ansible_collections +collections_paths = ./collections private_key_file = ./id_ed25519 vault_password_file = vault-pass.yml gathering = smart -#display_ok_hosts = no # zeigt nur noch changed und error tasks/hosts an -#display_skipped_hosts = yes # dito -# callback_plugins = ./plugins/callback -# python3 -m ara.setup.callback_plugins -# callbacks_enabled = mhansen.ansible_trace.trace # https://github.com/mhansen/ansible-trace -[inventory] - -[privilege_escalation] - -[paramiko_connection] - -[ssh_connection] - -[persistent_connection] - -[accelerate] - -[selinux] - -[colors] - [diff] always = true diff --git a/ansible_collections/community/general/.azure-pipelines/azure-pipelines.yml b/ansible_collections/community/general/.azure-pipelines/azure-pipelines.yml deleted file mode 100644 index 99767c6d..00000000 --- a/ansible_collections/community/general/.azure-pipelines/azure-pipelines.yml +++ /dev/null @@ -1,491 +0,0 @@ -trigger: - batch: true - branches: - include: - - main - - stable-* - -pr: - autoCancel: true - branches: - include: - - main - - stable-* - -schedules: - - cron: 0 8 * * * - displayName: Nightly (main) - always: true - branches: - include: - - main - - cron: 0 10 * * * - displayName: Nightly (active stable branches) - always: true - branches: - include: - - stable-3 - - stable-4 - - cron: 0 11 * * 0 - displayName: Weekly (old stable branches) - always: true - branches: - include: - - stable-1 - - stable-2 - -variables: - - name: checkoutPath - value: ansible_collections/community/general - - name: coverageBranches - value: main - - name: pipelinesCoverage - value: coverage - - name: entryPoint - value: tests/utils/shippable/shippable.sh - - name: fetchDepth - value: 0 - -resources: - containers: - - container: default - image: quay.io/ansible/azure-pipelines-test-container:1.9.0 - -pool: Standard - -stages: -### Sanity - - stage: Sanity_devel - displayName: Sanity devel - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Test {0} - testFormat: devel/sanity/{0} - targets: - - test: 1 - - test: 2 - - test: 3 - - test: 4 - - test: extra - - stage: Sanity_2_12 - displayName: Sanity 2.12 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Test {0} - testFormat: 2.12/sanity/{0} - targets: - - test: 1 - - test: 2 - - test: 3 - - test: 4 - - stage: Sanity_2_11 - displayName: Sanity 2.11 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Test {0} - testFormat: 2.11/sanity/{0} - targets: - - test: 1 - - test: 2 - - test: 3 - - test: 4 - - stage: Sanity_2_10 - displayName: Sanity 2.10 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Test {0} - testFormat: 2.10/sanity/{0} - targets: - - test: 1 - - test: 2 - - test: 3 - - test: 4 - - stage: Sanity_2_9 - displayName: Sanity 2.9 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Test {0} - testFormat: 2.9/sanity/{0} - targets: - - test: 1 - - test: 2 - - test: 3 - - test: 4 -### Units - - stage: Units_devel - displayName: Units devel - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: devel/units/{0}/1 - targets: - - test: 2.7 - - test: 3.5 - - test: 3.6 - - test: 3.7 - - test: 3.8 - - test: 3.9 - - test: '3.10' - - stage: Units_2_12 - displayName: Units 2.12 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.12/units/{0}/1 - targets: - - test: 2.6 - - test: 2.7 - - test: 3.5 - - test: 3.6 - - test: 3.7 - - test: 3.8 - - test: '3.10' - - stage: Units_2_11 - displayName: Units 2.11 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.11/units/{0}/1 - targets: - - test: 2.6 - - test: 2.7 - - test: 3.5 - - test: 3.6 - - test: 3.7 - - test: 3.8 - - test: 3.9 - - stage: Units_2_10 - displayName: Units 2.10 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.10/units/{0}/1 - targets: - - test: 2.7 - - test: 3.6 - - stage: Units_2_9 - displayName: Units 2.9 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.9/units/{0}/1 - targets: - - test: 2.6 - - test: 2.7 - - test: 3.5 - - test: 3.6 - - test: 3.7 - - test: 3.8 - -## Remote - - stage: Remote_devel - displayName: Remote devel - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - testFormat: devel/{0} - targets: - - name: macOS 12.0 - test: macos/12.0 - - name: RHEL 7.9 - test: rhel/7.9 - - name: RHEL 8.5 - test: rhel/8.5 - - name: FreeBSD 12.3 - test: freebsd/12.3 - - name: FreeBSD 13.0 - test: freebsd/13.0 - groups: - - 1 - - 2 - - 3 - - stage: Remote_2_12 - displayName: Remote 2.12 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - testFormat: 2.12/{0} - targets: - - name: macOS 11.1 - test: macos/11.1 - - name: RHEL 8.4 - test: rhel/8.4 - - name: FreeBSD 13.0 - test: freebsd/13.0 - groups: - - 1 - - 2 - - stage: Remote_2_11 - displayName: Remote 2.11 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - testFormat: 2.11/{0} - targets: - - name: RHEL 7.9 - test: rhel/7.9 - - name: RHEL 8.3 - test: rhel/8.3 - - name: FreeBSD 12.2 - test: freebsd/12.2 - groups: - - 1 - - 2 - - stage: Remote_2_10 - displayName: Remote 2.10 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - testFormat: 2.10/{0} - targets: - - name: OS X 10.11 - test: osx/10.11 - - name: macOS 10.15 - test: macos/10.15 - groups: - - 1 - - 2 - - stage: Remote_2_9 - displayName: Remote 2.9 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - testFormat: 2.9/{0} - targets: - - name: RHEL 8.2 - test: rhel/8.2 - - name: RHEL 7.8 - test: rhel/7.8 - - name: FreeBSD 12.0 - test: freebsd/12.0 - groups: - - 1 - - 2 - -### Docker - - stage: Docker_devel - displayName: Docker devel - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - testFormat: devel/linux/{0} - targets: - - name: CentOS 7 - test: centos7 - - name: Fedora 34 - test: fedora34 - - name: Fedora 35 - test: fedora35 - - name: openSUSE 15 py2 - test: opensuse15py2 - - name: openSUSE 15 py3 - test: opensuse15 - - name: Ubuntu 18.04 - test: ubuntu1804 - - name: Ubuntu 20.04 - test: ubuntu2004 - - name: Alpine 3 - test: alpine3 - groups: - - 1 - - 2 - - 3 - - stage: Docker_2_12 - displayName: Docker 2.12 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - testFormat: 2.12/linux/{0} - targets: - - name: CentOS 6 - test: centos6 - - name: Fedora 34 - test: fedora34 - - name: openSUSE 15 py3 - test: opensuse15 - - name: Ubuntu 20.04 - test: ubuntu2004 - groups: - - 1 - - 2 - - 3 - - stage: Docker_2_11 - displayName: Docker 2.11 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - testFormat: 2.11/linux/{0} - targets: - - name: CentOS 7 - test: centos7 - - name: Fedora 33 - test: fedora33 - - name: openSUSE 15 py2 - test: opensuse15py2 - - name: Alpine 3 - test: alpine3 - groups: - - 2 - - 3 - - stage: Docker_2_10 - displayName: Docker 2.10 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - testFormat: 2.10/linux/{0} - targets: - - name: Fedora 32 - test: fedora32 - - name: Ubuntu 16.04 - test: ubuntu1604 - groups: - - 2 - - 3 - - stage: Docker_2_9 - displayName: Docker 2.9 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - testFormat: 2.9/linux/{0} - targets: - - name: Fedora 31 - test: fedora31 - - name: openSUSE 15 py3 - test: opensuse15 - groups: - - 2 - - 3 - -### Community Docker - - stage: Docker_community_devel - displayName: Docker (community images) devel - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - testFormat: devel/linux-community/{0} - targets: - - name: Debian Bullseye - test: debian-bullseye/3.9 - - name: ArchLinux - test: archlinux/3.10 - - name: CentOS Stream 8 - test: centos-stream8/3.8 - groups: - - 1 - - 2 - - 3 - -### Cloud - - stage: Cloud_devel - displayName: Cloud devel - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: devel/cloud/{0}/1 - targets: - - test: 2.7 - - test: 3.9 - - stage: Cloud_2_12 - displayName: Cloud 2.12 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.12/cloud/{0}/1 - targets: - - test: 3.8 - - stage: Cloud_2_11 - displayName: Cloud 2.11 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.11/cloud/{0}/1 - targets: - - test: 3.6 - - stage: Cloud_2_10 - displayName: Cloud 2.10 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.10/cloud/{0}/1 - targets: - - test: 3.5 - - stage: Cloud_2_9 - displayName: Cloud 2.9 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.9/cloud/{0}/1 - targets: - - test: 2.7 - - stage: Summary - condition: succeededOrFailed() - dependsOn: - - Sanity_devel - - Sanity_2_9 - - Sanity_2_10 - - Sanity_2_11 - - Sanity_2_12 - - Units_devel - - Units_2_9 - - Units_2_10 - - Units_2_11 - - Units_2_12 - - Remote_devel - - Remote_2_9 - - Remote_2_10 - - Remote_2_11 - - Remote_2_12 - - Docker_devel - - Docker_2_9 - - Docker_2_10 - - Docker_2_11 - - Docker_2_12 - - Docker_community_devel - - Cloud_devel - - Cloud_2_9 - - Cloud_2_10 - - Cloud_2_11 - - Cloud_2_12 - jobs: - - template: templates/coverage.yml diff --git a/ansible_collections/community/general/.azure-pipelines/scripts/aggregate-coverage.sh b/ansible_collections/community/general/.azure-pipelines/scripts/aggregate-coverage.sh deleted file mode 100644 index 1ccfcf20..00000000 --- a/ansible_collections/community/general/.azure-pipelines/scripts/aggregate-coverage.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env bash -# Aggregate code coverage results for later processing. - -set -o pipefail -eu - -agent_temp_directory="$1" - -PATH="${PWD}/bin:${PATH}" - -mkdir "${agent_temp_directory}/coverage/" - -options=(--venv --venv-system-site-packages --color -v) - -ansible-test coverage combine --group-by command --export "${agent_temp_directory}/coverage/" "${options[@]}" - -if ansible-test coverage analyze targets generate --help >/dev/null 2>&1; then - # Only analyze coverage if the installed version of ansible-test supports it. - # Doing so allows this script to work unmodified for multiple Ansible versions. - ansible-test coverage analyze targets generate "${agent_temp_directory}/coverage/coverage-analyze-targets.json" "${options[@]}" -fi diff --git a/ansible_collections/community/general/.azure-pipelines/scripts/report-coverage.sh b/ansible_collections/community/general/.azure-pipelines/scripts/report-coverage.sh deleted file mode 100644 index c039f7dc..00000000 --- a/ansible_collections/community/general/.azure-pipelines/scripts/report-coverage.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env bash -# Generate code coverage reports for uploading to Azure Pipelines and codecov.io. - -set -o pipefail -eu - -PATH="${PWD}/bin:${PATH}" - -if ! ansible-test --help >/dev/null 2>&1; then - # Install the devel version of ansible-test for generating code coverage reports. - # This is only used by Ansible Collections, which are typically tested against multiple Ansible versions (in separate jobs). - # Since a version of ansible-test is required that can work the output from multiple older releases, the devel version is used. - pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check -fi - -ansible-test coverage xml --group-by command --stub --venv --venv-system-site-packages --color -v diff --git a/ansible_collections/community/general/.azure-pipelines/templates/coverage.yml b/ansible_collections/community/general/.azure-pipelines/templates/coverage.yml deleted file mode 100644 index 1b36ea45..00000000 --- a/ansible_collections/community/general/.azure-pipelines/templates/coverage.yml +++ /dev/null @@ -1,39 +0,0 @@ -# This template adds a job for processing code coverage data. -# It will upload results to Azure Pipelines and codecov.io. -# Use it from a job stage that completes after all other jobs have completed. -# This can be done by placing it in a separate summary stage that runs after the test stage(s) have completed. - -jobs: - - job: Coverage - displayName: Code Coverage - container: default - workspace: - clean: all - steps: - - checkout: self - fetchDepth: $(fetchDepth) - path: $(checkoutPath) - - task: DownloadPipelineArtifact@2 - displayName: Download Coverage Data - inputs: - path: coverage/ - patterns: "Coverage */*=coverage.combined" - - bash: .azure-pipelines/scripts/combine-coverage.py coverage/ - displayName: Combine Coverage Data - - bash: .azure-pipelines/scripts/report-coverage.sh - displayName: Generate Coverage Report - condition: gt(variables.coverageFileCount, 0) - - task: PublishCodeCoverageResults@1 - inputs: - codeCoverageTool: Cobertura - # Azure Pipelines only accepts a single coverage data file. - # That means only Python or PowerShell coverage can be uploaded, but not both. - # Set the "pipelinesCoverage" variable to determine which type is uploaded. - # Use "coverage" for Python and "coverage-powershell" for PowerShell. - summaryFileLocation: "$(outputPath)/reports/$(pipelinesCoverage).xml" - displayName: Publish to Azure Pipelines - condition: gt(variables.coverageFileCount, 0) - - bash: .azure-pipelines/scripts/publish-codecov.py "$(outputPath)" - displayName: Publish to codecov.io - condition: gt(variables.coverageFileCount, 0) - continueOnError: true diff --git a/ansible_collections/community/general/.github/BOTMETA.yml b/ansible_collections/community/general/.github/BOTMETA.yml deleted file mode 100644 index 00578f5e..00000000 --- a/ansible_collections/community/general/.github/BOTMETA.yml +++ /dev/null @@ -1,1259 +0,0 @@ -notifications: true -automerge: true -files: - plugins/: - supershipit: quidame - changelogs/: {} - changelogs/fragments/: - support: community - $actions: - labels: action - $actions/system/iptables_state.py: - maintainers: quidame - $actions/system/shutdown.py: - maintainers: nitzmahone samdoran aminvakil - $becomes/: - labels: become - $becomes/doas.py: - maintainers: $team_ansible_core - $becomes/dzdo.py: - maintainers: $team_ansible_core - $becomes/ksu.py: - maintainers: $team_ansible_core - $becomes/machinectl.py: - maintainers: $team_ansible_core - $becomes/pbrun.py: - maintainers: $team_ansible_core - $becomes/pfexec.py: - maintainers: $team_ansible_core - $becomes/pmrun.py: - maintainers: $team_ansible_core - $becomes/sesu.py: - maintainers: nekonyuu - $becomes/sudosu.py: - maintainers: dagwieers - $caches/: - labels: cache - $caches/memcached.py: {} - $caches/pickle.py: - maintainers: bcoca - $caches/redis.py: {} - $caches/yaml.py: - maintainers: bcoca - $callbacks/: - labels: callbacks - $callbacks/cgroup_memory_recap.py: {} - $callbacks/context_demo.py: {} - $callbacks/counter_enabled.py: {} - $callbacks/dense.py: - maintainers: dagwieers - $callbacks/diy.py: - maintainers: theque5t - $callbacks/elastic.py: - maintainers: v1v - keywords: apm observability - $callbacks/hipchat.py: {} - $callbacks/jabber.py: {} - $callbacks/loganalytics.py: - maintainers: zhcli - $callbacks/logdna.py: {} - $callbacks/logentries.py: {} - $callbacks/log_plays.py: {} - $callbacks/logstash.py: - maintainers: ujenmr - $callbacks/mail.py: - maintainers: dagwieers - $callbacks/nrdp.py: - maintainers: rverchere - $callbacks/null.py: {} - $callbacks/opentelemetry.py: - maintainers: v1v - keywords: opentelemetry observability - $callbacks/say.py: - notify: chris-short - maintainers: $team_macos - labels: macos say - keywords: brew cask darwin homebrew macosx macports osx - $callbacks/selective.py: {} - $callbacks/slack.py: {} - $callbacks/splunk.py: {} - $callbacks/sumologic.py: - maintainers: ryancurrah - labels: sumologic - $callbacks/syslog_json.py: - maintainers: imjoseangel - $callbacks/unixy.py: - maintainers: akatch - labels: unixy - $callbacks/yaml.py: {} - $connections/: - labels: connections - $connections/chroot.py: {} - $connections/funcd.py: - maintainers: mscherer - $connections/iocage.py: {} - $connections/jail.py: - maintainers: $team_ansible_core - $connections/lxc.py: {} - $connections/lxd.py: - maintainers: mattclay - labels: lxd - $connections/qubes.py: - maintainers: kushaldas - $connections/saltstack.py: - maintainers: mscherer - labels: saltstack - $connections/zone.py: - maintainers: $team_ansible_core - $doc_fragments/: - labels: docs_fragments - $doc_fragments/hpe3par.py: - maintainers: farhan7500 gautamphegde - labels: hpe3par - $doc_fragments/hwc.py: - maintainers: $team_huawei - labels: hwc - $doc_fragments/nomad.py: - maintainers: chris93111 - $doc_fragments/xenserver.py: - maintainers: bvitnik - labels: xenserver - $filters/counter.py: - maintainers: keilr - $filters/dict.py: - maintainers: felixfontein - $filters/dict_kv.py: - maintainers: giner - $filters/from_csv.py: - maintainers: Ajpantuso - $filters/groupby: - maintainers: felixfontein - $filters/hashids: - maintainers: Ajpantuso - $filters/jc.py: - maintainers: kellyjonbrazil - $filters/json_query.py: {} - $filters/list.py: - maintainers: vbotka - $filters/path_join_shim.py: - maintainers: felixfontein - $filters/random_mac.py: {} - $filters/time.py: - maintainers: resmo - $filters/unicode_normalize.py: - maintainers: Ajpantuso - $filters/version_sort.py: - maintainers: ericzolf - $inventories/: - labels: inventories - $inventories/cobbler.py: - maintainers: opoplawski - $inventories/gitlab_runners.py: - maintainers: morph027 - $inventories/linode.py: - maintainers: $team_linode - labels: cloud linode - keywords: linode dynamic inventory script - $inventories/lxd.py: - maintainers: conloos - $inventories/nmap.py: {} - $inventories/online.py: - maintainers: remyleone - $inventories/opennebula.py: - maintainers: feldsam - labels: cloud opennebula - keywords: opennebula dynamic inventory script - $inventories/proxmox.py: - maintainers: $team_virt ilijamt - $inventories/xen_orchestra.py: - maintainers: ddelnano shinuza - $inventories/icinga2.py: - maintainers: BongoEADGC6 - $inventories/scaleway.py: - maintainers: $team_scaleway - labels: cloud scaleway - $inventories/stackpath_compute.py: - maintainers: shayrybak - $inventories/virtualbox.py: {} - $lookups/: - labels: lookups - $lookups/cartesian.py: {} - $lookups/chef_databag.py: {} - $lookups/collection_version.py: - maintainers: felixfontein - $lookups/consul_kv.py: {} - $lookups/credstash.py: {} - $lookups/cyberarkpassword.py: - notify: cyberark-bizdev - labels: cyberarkpassword - $lookups/dependent.py: - maintainers: felixfontein - $lookups/dig.py: - maintainers: jpmens - labels: dig - $lookups/dnstxt.py: - maintainers: jpmens - $lookups/dsv.py: - maintainers: amigus endlesstrax - $lookups/etcd3.py: - maintainers: eric-belhomme - $lookups/etcd.py: - maintainers: jpmens - $lookups/filetree.py: - maintainers: dagwieers - $lookups/flattened.py: {} - $lookups/hiera.py: - maintainers: jparrill - $lookups/keyring.py: {} - $lookups/lastpass.py: {} - $lookups/lmdb_kv.py: - maintainers: jpmens - $lookups/manifold.py: - maintainers: galanoff - labels: manifold - $lookups/onepass: - maintainers: samdoran - labels: onepassword - $lookups/onepassword.py: - maintainers: azenk scottsb - $lookups/onepassword_raw.py: - maintainers: azenk scottsb - $lookups/passwordstore.py: {} - $lookups/random_pet.py: - maintainers: Akasurde - $lookups/random_string.py: - maintainers: Akasurde - $lookups/random_words.py: - maintainers: konstruktoid - $lookups/redis.py: - maintainers: $team_ansible_core jpmens - $lookups/revbitspss.py: - maintainers: RevBits - $lookups/shelvefile.py: {} - $lookups/tss.py: - maintainers: amigus endlesstrax - $module_utils/: - labels: module_utils - $module_utils/gitlab.py: - notify: jlozadad - maintainers: $team_gitlab - labels: gitlab - keywords: gitlab source_control - $module_utils/hwc_utils.py: - maintainers: $team_huawei - labels: huawei hwc_utils networking - keywords: cloud huawei hwc - $module_utils/identity/keycloak/keycloak.py: - maintainers: $team_keycloak - $module_utils/ipa.py: - maintainers: $team_ipa - labels: ipa - $module_utils/manageiq.py: - maintainers: $team_manageiq - labels: manageiq - $module_utils/memset.py: - maintainers: glitchcrab - labels: cloud memset - $module_utils/mh/: - maintainers: russoz - labels: module_helper - $module_utils/module_helper.py: - maintainers: russoz - labels: module_helper - $module_utils/oracle/oci_utils.py: - maintainers: $team_oracle - labels: cloud - $module_utils/pure.py: - maintainers: $team_purestorage - labels: pure pure_storage - $module_utils/redfish_utils.py: - maintainers: $team_redfish - labels: redfish_utils - $module_utils/remote_management/lxca/common.py: - maintainers: navalkp prabhosa - $module_utils/scaleway.py: - maintainers: $team_scaleway - labels: cloud scaleway - $module_utils/storage/hpe3par/hpe3par.py: - maintainers: farhan7500 gautamphegde - $module_utils/utm_utils.py: - maintainers: $team_e_spirit - labels: utm_utils - $module_utils/xenserver.py: - maintainers: bvitnik - labels: xenserver - $modules/cloud/alicloud/: - maintainers: xiaozhu36 - $modules/cloud/atomic/atomic_container.py: - maintainers: giuseppe krsacme - $modules/cloud/atomic/: - maintainers: krsacme - $modules/cloud/centurylink/: - maintainers: clc-runner - $modules/cloud/dimensiondata/dimensiondata_network.py: - maintainers: aimonb tintoy - labels: dimensiondata_network - $modules/cloud/dimensiondata/dimensiondata_vlan.py: - maintainers: tintoy - $modules/cloud/heroku/heroku_collaborator.py: - maintainers: marns93 - $modules/cloud/huawei/: - maintainers: $team_huawei huaweicloud - keywords: cloud huawei hwc - $modules/cloud/linode/: - maintainers: $team_linode - $modules/cloud/linode/linode.py: - maintainers: zbal - $modules/cloud/lxc/lxc_container.py: - maintainers: cloudnull - $modules/cloud/lxd/: - ignore: hnakamur - $modules/cloud/lxd/lxd_profile.py: - maintainers: conloos - $modules/cloud/memset/: - maintainers: glitchcrab - $modules/cloud/misc/cloud_init_data_facts.py: - maintainers: resmo - $modules/cloud/misc/proxmox: - maintainers: $team_virt - labels: proxmox virt - keywords: kvm libvirt proxmox qemu - $modules/cloud/misc/proxmox.py: - maintainers: UnderGreen - ignore: skvidal - $modules/cloud/misc/proxmox_kvm.py: - maintainers: helldorado - ignore: skvidal - $modules/cloud/misc/proxmox_nic.py: - maintainers: Kogelvis - $modules/cloud/misc/proxmox_tasks_info: - maintainers: paginabianca - $modules/cloud/misc/proxmox_template.py: - maintainers: UnderGreen - ignore: skvidal - $modules/cloud/misc/rhevm.py: - maintainers: $team_virt TimothyVandenbrande - labels: rhevm virt - ignore: skvidal - keywords: kvm libvirt proxmox qemu - $modules/cloud/misc/: - ignore: ryansb - $modules/cloud/misc/terraform.py: - maintainers: m-yosefpor rainerleber - $modules/cloud/misc/xenserver_facts.py: - maintainers: caphrim007 cheese - labels: xenserver_facts - ignore: andyhky - $modules/cloud/oneandone/: - maintainers: aajdinov edevenport - $modules/cloud/online/: - maintainers: remyleone - $modules/cloud/opennebula/: - maintainers: $team_opennebula - $modules/cloud/opennebula/one_host.py: - maintainers: rvalle - $modules/cloud/oracle/oci_vcn.py: - maintainers: $team_oracle rohitChaware - $modules/cloud/ovh/: - maintainers: pascalheraud - $modules/cloud/ovh/ovh_monthly_billing.py: - maintainers: fraff - $modules/cloud/packet/packet_device.py: - maintainers: baldwinSPC t0mk teebes - $modules/cloud/packet/: - maintainers: nurfet-becirevic t0mk - $modules/cloud/packet/packet_sshkey.py: - maintainers: t0mk - $modules/cloud/profitbricks/: - maintainers: baldwinSPC - $modules/cloud/pubnub/pubnub_blocks.py: - maintainers: parfeon pubnub - $modules/cloud/rackspace/rax.py: - maintainers: omgjlk sivel - $modules/cloud/rackspace/: - ignore: ryansb sivel - $modules/cloud/rackspace/rax_cbs.py: - maintainers: claco - $modules/cloud/rackspace/rax_cbs_attachments.py: - maintainers: claco - $modules/cloud/rackspace/rax_cdb.py: - maintainers: jails - $modules/cloud/rackspace/rax_cdb_user.py: - maintainers: jails - $modules/cloud/rackspace/rax_cdb_database.py: - maintainers: jails - $modules/cloud/rackspace/rax_clb.py: - maintainers: claco - $modules/cloud/rackspace/rax_clb_nodes.py: - maintainers: neuroid - $modules/cloud/rackspace/rax_clb_ssl.py: - maintainers: smashwilson - $modules/cloud/rackspace/rax_files.py: - maintainers: angstwad - $modules/cloud/rackspace/rax_files_objects.py: - maintainers: angstwad - $modules/cloud/rackspace/rax_identity.py: - maintainers: claco - $modules/cloud/rackspace/rax_network.py: - maintainers: claco omgjlk - $modules/cloud/rackspace/rax_mon_alarm.py: - maintainers: smashwilson - $modules/cloud/rackspace/rax_mon_check.py: - maintainers: smashwilson - $modules/cloud/rackspace/rax_mon_entity.py: - maintainers: smashwilson - $modules/cloud/rackspace/rax_mon_notification.py: - maintainers: smashwilson - $modules/cloud/rackspace/rax_mon_notification_plan.py: - maintainers: smashwilson - $modules/cloud/rackspace/rax_queue.py: - maintainers: claco - $modules/cloud/scaleway/: - maintainers: $team_scaleway - $modules/cloud/scaleway/scaleway_database_backup.py: - maintainers: guillaume_ro_fr - $modules/cloud/scaleway/scaleway_image_info.py: - maintainers: Spredzy - $modules/cloud/scaleway/scaleway_ip_info.py: - maintainers: Spredzy - $modules/cloud/scaleway/scaleway_organization_info.py: - maintainers: Spredzy - $modules/cloud/scaleway/scaleway_private_network.py: - maintainers: pastral - $modules/cloud/scaleway/scaleway_security_group.py: - maintainers: DenBeke - $modules/cloud/scaleway/scaleway_security_group_info.py: - maintainers: Spredzy - $modules/cloud/scaleway/scaleway_security_group_rule.py: - maintainers: DenBeke - $modules/cloud/scaleway/scaleway_server_info.py: - maintainers: Spredzy - $modules/cloud/scaleway/scaleway_snapshot_info.py: - maintainers: Spredzy - $modules/cloud/scaleway/scaleway_volume.py: - labels: scaleway_volume - ignore: hekonsek - $modules/cloud/scaleway/scaleway_volume_info.py: - maintainers: Spredzy - $modules/cloud/smartos/: - maintainers: $team_solaris - labels: solaris - keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool - $modules/cloud/smartos/nictagadm.py: - maintainers: SmithX10 - $modules/cloud/softlayer/sl_vm.py: - maintainers: mcltn - $modules/cloud/spotinst/spotinst_aws_elastigroup.py: - maintainers: talzur - $modules/cloud/univention/: - maintainers: keachi - $modules/cloud/webfaction/: - maintainers: quentinsf - $modules/cloud/xenserver/: - maintainers: bvitnik - $modules/clustering/consul/: - maintainers: $team_consul - ignore: colin-nolan - $modules/clustering/etcd3.py: - maintainers: evrardjp - ignore: vfauth - $modules/clustering/nomad/: - maintainers: chris93111 - $modules/clustering/pacemaker_cluster.py: - maintainers: matbu - $modules/clustering/znode.py: - maintainers: treyperry - $modules/database/aerospike/aerospike_migrations.py: - maintainers: Alb0t - $modules/database/influxdb/: - maintainers: kamsz - $modules/database/influxdb/influxdb_query.py: - maintainers: resmo - $modules/database/influxdb/influxdb_user.py: - maintainers: zhhuta - $modules/database/influxdb/influxdb_write.py: - maintainers: resmo - $modules/database/misc/elasticsearch_plugin.py: - maintainers: ThePixelDeveloper samdoran - $modules/database/misc/kibana_plugin.py: - maintainers: barryib - $modules/database/misc/odbc.py: - maintainers: john-westcott-iv - $modules/database/misc/redis.py: - maintainers: slok - $modules/database/misc/redis_info.py: - maintainers: levonet - $modules/database/misc/redis_data_info.py: - maintainers: paginabianca - $modules/database/misc/redis_data.py: - maintainers: paginabianca - $modules/database/misc/redis_data_incr.py: - maintainers: paginabianca - $modules/database/misc/riak.py: - maintainers: drewkerrigan jsmartin - $modules/database/mssql/mssql_db.py: - maintainers: vedit Jmainguy kenichi-ogawa-1988 - labels: mssql_db - $modules/database/mssql/mssql_script.py: - maintainers: kbudde - labels: mssql_script - $modules/database/saphana/hana_query.py: - maintainers: rainerleber - $modules/database/vertica/: - maintainers: dareko - $modules/files/archive.py: - maintainers: bendoh - $modules/files/filesize.py: - maintainers: quidame - $modules/files/ini_file.py: - maintainers: jpmens noseka1 - $modules/files/iso_create.py: - maintainers: Tomorrow9 - $modules/files/iso_extract.py: - maintainers: dagwieers jhoekx ribbons - $modules/files/read_csv.py: - maintainers: dagwieers - $modules/files/sapcar_extract.py: - maintainers: RainerLeber - $modules/files/xattr.py: - maintainers: bcoca - labels: xattr - $modules/files/xml.py: - maintainers: dagwieers magnus919 tbielawa cmprescott sm4rk0 - labels: m:xml xml - ignore: magnus919 - $modules/identity/ipa/: - maintainers: $team_ipa - $modules/identity/ipa/ipa_pwpolicy.py: - maintainers: adralioh - $modules/identity/ipa/ipa_service.py: - maintainers: cprh - $modules/identity/ipa/ipa_vault.py: - maintainers: jparrill - $modules/identity/keycloak/: - maintainers: $team_keycloak - $modules/identity/keycloak/keycloak_authentication.py: - maintainers: elfelip Gaetan2907 - $modules/identity/keycloak/keycloak_clientscope.py: - maintainers: Gaetan2907 - $modules/identity/keycloak/keycloak_client_rolemapping.py: - maintainers: Gaetan2907 - $modules/identity/keycloak/keycloak_group.py: - maintainers: adamgoossens - $modules/identity/keycloak/keycloak_identity_provider.py: - maintainers: laurpaum - $modules/identity/keycloak/keycloak_realm_info.py: - maintainers: fynncfchen - $modules/identity/keycloak/keycloak_realm.py: - maintainers: kris2kris - $modules/identity/keycloak/keycloak_role.py: - maintainers: laurpaum - $modules/identity/keycloak/keycloak_user_federation.py: - maintainers: laurpaum - $modules/identity/onepassword_info.py: - maintainers: Rylon - $modules/identity/opendj/opendj_backendprop.py: - maintainers: dj-wasabi - $modules/monitoring/airbrake_deployment.py: - maintainers: phumpal - labels: airbrake_deployment - ignore: bpennypacker - $modules/monitoring/bigpanda.py: - maintainers: hkariti - $modules/monitoring/circonus_annotation.py: - maintainers: NickatEpic - $modules/monitoring/datadog/datadog_event.py: - maintainers: n0ts - labels: datadog_event - ignore: arturaz - $modules/monitoring/datadog/datadog_downtime.py: - maintainers: Datadog - $modules/monitoring/datadog/datadog_monitor.py: - maintainers: skornehl - $modules/monitoring/honeybadger_deployment.py: - maintainers: stympy - $modules/monitoring/icinga2_feature.py: - maintainers: nerzhul - $modules/monitoring/icinga2_host.py: - maintainers: t794104 - $modules/monitoring/librato_annotation.py: - maintainers: Sedward - $modules/monitoring/logentries.py: - labels: logentries - ignore: ivanvanderbyl - $modules/monitoring/logstash_plugin.py: - maintainers: nerzhul - $modules/monitoring/monit.py: - maintainers: dstoflet brian-brazil snopoke - labels: monit - $modules/monitoring/nagios.py: - maintainers: tbielawa tgoetheyn - $modules/monitoring/newrelic_deployment.py: - maintainers: mcodd - $modules/monitoring/pagerduty.py: - maintainers: suprememoocow thaumos - labels: pagerduty - ignore: bpennypacker - $modules/monitoring/pagerduty_alert.py: - maintainers: ApsOps - $modules/monitoring/pagerduty_change.py: - maintainers: adamvaughan - $modules/monitoring/pagerduty_user.py: - maintainers: zanssa - $modules/monitoring/pingdom.py: - maintainers: thaumos - $modules/monitoring/rollbar_deployment.py: - maintainers: kavu - $modules/monitoring/sensu/sensu_check.py: - maintainers: andsens - $modules/monitoring/sensu/: - maintainers: dmsimard - $modules/monitoring/sensu/sensu_silence.py: - maintainers: smbambling - $modules/monitoring/sensu/sensu_subscription.py: - maintainers: andsens - $modules/monitoring/spectrum_device.py: - maintainers: orgito - $modules/monitoring/spectrum_model_attrs.py: - maintainers: tgates81 - $modules/monitoring/stackdriver.py: - maintainers: bwhaley - $modules/monitoring/statsd.py: - maintainers: mamercad - $modules/monitoring/statusio_maintenance.py: - maintainers: bhcopeland - $modules/monitoring/uptimerobot.py: - maintainers: nate-kingsley - $modules/net_tools/cloudflare_dns.py: - maintainers: mgruener - labels: cloudflare_dns - $modules/net_tools/dnsimple.py: - maintainers: drcapulet - $modules/net_tools/dnsimple_info.py: - maintainers: edhilgendorf - $modules/net_tools/dnsmadeeasy.py: - maintainers: briceburg - $modules/net_tools/gandi_livedns.py: - maintainers: gthiemonge - $modules/net_tools/haproxy.py: - maintainers: ravibhure Normo - $modules/net_tools/infinity/infinity.py: - maintainers: MeganLiu - $modules/net_tools/ip_netns.py: - maintainers: bregman-arie - $modules/net_tools/ipify_facts.py: - maintainers: resmo - $modules/net_tools/ipinfoio_facts.py: - maintainers: akostyuk - $modules/net_tools/ipwcli_dns.py: - maintainers: cwollinger - $modules/net_tools/ldap/ldap_attrs.py: - maintainers: drybjed jtyr noles - $modules/net_tools/ldap/ldap_entry.py: - maintainers: jtyr - $modules/net_tools/ldap/ldap_passwd.py: - maintainers: KellerFuchs jtyr - $modules/net_tools/ldap/ldap_search.py: - maintainers: eryx12o45 jtyr - $modules/net_tools/lldp.py: - labels: lldp - ignore: andyhky - $modules/net_tools/netcup_dns.py: - maintainers: nbuchwitz - $modules/net_tools/nsupdate.py: - maintainers: nerzhul - $modules/net_tools/omapi_host.py: - maintainers: amasolov nerzhul - $modules/net_tools/pritunl/: - maintainers: Lowess - $modules/net_tools/nmcli.py: - maintainers: alcamie101 - $modules/net_tools/snmp_facts.py: - maintainers: ogenstad ujwalkomarla - $modules/notification/bearychat.py: - maintainers: tonyseek - $modules/notification/campfire.py: - maintainers: fabulops - $modules/notification/catapult.py: - maintainers: Jmainguy - $modules/notification/cisco_webex.py: - maintainers: drew-russell - $modules/notification/discord.py: - maintainers: cwollinger - $modules/notification/flowdock.py: - maintainers: mcodd - $modules/notification/grove.py: - maintainers: zimbatm - $modules/notification/hipchat.py: - maintainers: pb8226 shirou - $modules/notification/irc.py: - maintainers: jpmens sivel - $modules/notification/jabber.py: - maintainers: bcoca - $modules/notification/logentries_msg.py: - maintainers: jcftang - $modules/notification/mail.py: - maintainers: dagwieers - $modules/notification/matrix.py: - maintainers: jcgruenhage - $modules/notification/mattermost.py: - maintainers: bjolivot - $modules/notification/mqtt.py: - maintainers: jpmens - $modules/notification/nexmo.py: - maintainers: sivel - $modules/notification/office_365_connector_card.py: - maintainers: marc-sensenich - $modules/notification/pushbullet.py: - maintainers: willybarro - $modules/notification/pushover.py: - maintainers: weaselkeeper wopfel - $modules/notification/rocketchat.py: - maintainers: Deepakkothandan - labels: rocketchat - ignore: ramondelafuente - $modules/notification/say.py: - maintainers: $team_ansible_core mpdehaan - $modules/notification/sendgrid.py: - maintainers: makaimc - $modules/notification/slack.py: - maintainers: ramondelafuente - $modules/notification/syslogger.py: - maintainers: garbled1 - $modules/notification/telegram.py: - maintainers: tyouxa loms lomserman - $modules/notification/twilio.py: - maintainers: makaimc - $modules/notification/typetalk.py: - maintainers: tksmd - $modules/packaging/language/ansible_galaxy_install.py: - maintainers: russoz - $modules/packaging/language/bower.py: - maintainers: mwarkentin - $modules/packaging/language/bundler.py: - maintainers: thoiberg - $modules/packaging/language/cargo.py: - maintainers: radek-sprta - $modules/packaging/language/composer.py: - maintainers: dmtrs - ignore: resmo - $modules/packaging/language/cpanm.py: - maintainers: fcuny russoz - $modules/packaging/language/easy_install.py: - maintainers: mattupstate - $modules/packaging/language/gem.py: - maintainers: $team_ansible_core johanwiren - labels: gem - $modules/packaging/language/maven_artifact.py: - maintainers: tumbl3w33d turb - labels: maven_artifact - ignore: chrisisbeef - $modules/packaging/language/npm.py: - maintainers: shane-walker xcambar - labels: npm - ignore: chrishoffman - $modules/packaging/language/pear.py: - labels: pear - ignore: jle64 - $modules/packaging/language/pip_package_info.py: - maintainers: bcoca matburt maxamillion - $modules/packaging/language/pipx.py: - maintainers: russoz - $modules/packaging/language/yarn.py: - maintainers: chrishoffman verkaufer - $modules/packaging/os/apk.py: - maintainers: tdtrask - labels: apk - ignore: kbrebanov - $modules/packaging/os/apt_repo.py: - maintainers: obirvalger - $modules/packaging/os/apt_rpm.py: - maintainers: evgkrsk - $modules/packaging/os/copr.py: - maintainers: schlupov - $modules/packaging/os/dnf_versionlock.py: - maintainers: moreda - $modules/packaging/os/flatpak.py: - maintainers: $team_flatpak - $modules/packaging/os/flatpak_remote.py: - maintainers: $team_flatpak - $modules/packaging/os/pkg5: - maintainers: $team_solaris mavit - labels: pkg5 solaris - keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool - $modules/packaging/os/homebrew.py: - notify: chris-short - maintainers: $team_macos andrew-d - labels: homebrew macos - ignore: ryansb - keywords: brew cask darwin homebrew macosx macports osx - $modules/packaging/os/homebrew_cask.py: - notify: chris-short - maintainers: $team_macos enriclluelles - labels: homebrew_ macos - ignore: ryansb - keywords: brew cask darwin homebrew macosx macports osx - $modules/packaging/os/homebrew_tap.py: - notify: chris-short - maintainers: $team_macos - labels: homebrew_ macos - ignore: ryansb - keywords: brew cask darwin homebrew macosx macports osx - $modules/packaging/os/installp.py: - maintainers: $team_aix kairoaraujo - labels: aix installp - keywords: aix efix lpar wpar - $modules/packaging/os/layman.py: - maintainers: jirutka - $modules/packaging/os/macports.py: - notify: chris-short - maintainers: $team_macos jcftang - labels: macos macports - ignore: ryansb - keywords: brew cask darwin homebrew macosx macports osx - $modules/packaging/os/mas.py: - maintainers: lukasbestle mheap - $modules/packaging/os/openbsd_pkg.py: - maintainers: $team_bsd eest - labels: bsd openbsd_pkg - ignore: ryansb - keywords: doas dragonfly freebsd iocage jail netbsd openbsd opnsense pfsense - $modules/packaging/os/opkg.py: - maintainers: skinp - $modules/packaging/os/pacman.py: - maintainers: elasticdog indrajitr tchernomax jraby - labels: pacman - ignore: elasticdog - $modules/packaging/os/pacman_key.py: - maintainers: grawlinson - labels: pacman - $modules/packaging/os/pkgin.py: - maintainers: $team_solaris L2G jasperla szinck martinm82 - labels: pkgin solaris - $modules/packaging/os/pkgng.py: - maintainers: $team_bsd bleader - labels: bsd pkgng - ignore: bleader - keywords: doas dragonfly freebsd iocage jail netbsd openbsd opnsense pfsense - $modules/packaging/os/pkgutil.py: - maintainers: $team_solaris dermute - labels: pkgutil solaris - $modules/packaging/os/portage.py: - maintainers: Tatsh wltjr - labels: portage - ignore: sayap - $modules/packaging/os/portinstall.py: - maintainers: $team_bsd berenddeboer - labels: bsd portinstall - ignore: ryansb - keywords: doas dragonfly freebsd iocage jail netbsd openbsd opnsense pfsense - $modules/packaging/os/pulp_repo.py: - maintainers: sysadmind - $modules/packaging/os/redhat_subscription.py: - maintainers: barnabycourt alikins kahowell - labels: redhat_subscription - $modules/packaging/os/rhn_channel.py: - maintainers: vincentvdk alikins $team_rhn - labels: rhn_channel - $modules/packaging/os/rhn_register.py: - maintainers: jlaska $team_rhn - labels: rhn_register - $modules/packaging/os/rhsm_release.py: - maintainers: seandst - $modules/packaging/os/rhsm_repository.py: - maintainers: giovannisciortino - $modules/packaging/os/rpm_ostree_pkg.py: - maintainers: dustymabe Akasurde - $modules/packaging/os/slackpkg.py: - maintainers: KimNorgaard - $modules/packaging/os/snap.py: - maintainers: angristan vcarceler - labels: snap - $modules/packaging/os/snap_alias.py: - maintainers: russoz - labels: snap - $modules/packaging/os/sorcery.py: - maintainers: vaygr - $modules/packaging/os/svr4pkg.py: - maintainers: $team_solaris brontitall - labels: solaris svr4pkg - $modules/packaging/os/swdepot.py: - maintainers: $team_hpux melodous - labels: hpux swdepot - keywords: hp-ux - $modules/packaging/os/swupd.py: - maintainers: hnanni albertomurillo - labels: swupd - $modules/packaging/os/urpmi.py: - maintainers: pmakowski - $modules/packaging/os/xbps.py: - maintainers: dinoocch the-maldridge - $modules/packaging/os/yum_versionlock.py: - maintainers: florianpaulhoberg aminvakil - $modules/packaging/os/zypper.py: - maintainers: $team_suse - labels: zypper - ignore: dirtyharrycallahan robinro - $modules/packaging/os/zypper_repository.py: - maintainers: $team_suse - labels: zypper - ignore: matze - $modules/remote_management/cobbler/: - maintainers: dagwieers - $modules/remote_management/hpilo/: - maintainers: haad - ignore: dagwieers - $modules/remote_management/imc/imc_rest.py: - maintainers: dagwieers - labels: cisco - $modules/remote_management/ipmi/: - maintainers: bgaifullin cloudnull - $modules/remote_management/lenovoxcc/: - maintainers: panyy3 renxulei - $modules/remote_management/lxca/: - maintainers: navalkp prabhosa - $modules/remote_management/manageiq/: - labels: manageiq - maintainers: $team_manageiq - $modules/remote_management/manageiq/manageiq_alert_profiles.py: - maintainers: elad661 - $modules/remote_management/manageiq/manageiq_alerts.py: - maintainers: elad661 - $modules/remote_management/manageiq/manageiq_group.py: - maintainers: evertmulder - $modules/remote_management/manageiq/manageiq_tenant.py: - maintainers: evertmulder - $modules/remote_management/oneview/: - maintainers: adriane-cardozo fgbulsoni tmiotto - $modules/remote_management/oneview/oneview_datacenter_info.py: - maintainers: aalexmonteiro madhav-bharadwaj ricardogpsf soodpr - $modules/remote_management/oneview/oneview_fc_network.py: - maintainers: fgbulsoni - $modules/remote_management/oneview/oneview_fcoe_network.py: - maintainers: fgbulsoni - $modules/remote_management/redfish/: - maintainers: $team_redfish - ignore: jose-delarosa - $modules/remote_management/stacki/stacki_host.py: - maintainers: bsanders bbyhuy - labels: stacki_host - $modules/remote_management/wakeonlan.py: - maintainers: dagwieers - $modules/source_control/bitbucket/: - maintainers: catcombo - $modules/source_control/bzr.py: - maintainers: andreparames - $modules/source_control/git_config.py: - maintainers: djmattyg007 mgedmin - $modules/source_control/github/github_deploy_key.py: - maintainers: bincyber - $modules/source_control/github/github_issue.py: - maintainers: Akasurde - $modules/source_control/github/github_key.py: - maintainers: erydo - labels: github_key - ignore: erydo - $modules/source_control/github/github_release.py: - maintainers: adrianmoisey - $modules/source_control/github/github_repo.py: - maintainers: atorrescogollo - $modules/source_control/github/: - maintainers: stpierre - $modules/source_control/gitlab/: - notify: jlozadad - maintainers: $team_gitlab - keywords: gitlab source_control - $modules/source_control/gitlab/gitlab_project_variable.py: - maintainers: markuman - $modules/source_control/gitlab/gitlab_runner.py: - maintainers: SamyCoenen - $modules/source_control/gitlab/gitlab_user.py: - maintainers: LennertMertens stgrace - $modules/source_control/gitlab/gitlab_branch.py: - maintainers: paytroff - $modules/source_control/hg.py: - maintainers: yeukhon - $modules/storage/emc/emc_vnx_sg_member.py: - maintainers: remixtj - $modules/storage/hpe3par/ss_3par_cpg.py: - maintainers: farhan7500 gautamphegde - $modules/storage/ibm/: - maintainers: tzure - $modules/storage/pmem/pmem.py: - maintainers: mizumm - $modules/storage/vexata/: - maintainers: vexata - $modules/storage/zfs/: - maintainers: $team_solaris - labels: solaris - keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool - $modules/storage/zfs/zfs.py: - maintainers: johanwiren - $modules/storage/zfs/zfs_delegate_admin.py: - maintainers: natefoo - $modules/system/aix: - maintainers: $team_aix - labels: aix - keywords: aix efix lpar wpar - $modules/system/alternatives.py: - maintainers: mulby - labels: alternatives - ignore: DavidWittman - $modules/system/aix_lvol.py: - maintainers: adejoux - $modules/system/awall.py: - maintainers: tdtrask - $modules/system/beadm.py: - maintainers: $team_solaris - labels: beadm solaris - keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool - $modules/system/capabilities.py: - maintainers: natefoo - $modules/system/cronvar.py: - maintainers: dougluce - $modules/system/crypttab.py: - maintainers: groks - $modules/system/dconf.py: - maintainers: azaghal - $modules/system/dpkg_divert.py: - maintainers: quidame - $modules/system/facter.py: - maintainers: $team_ansible_core gamethis - labels: facter - $modules/system/filesystem.py: - maintainers: pilou- abulimov quidame - labels: filesystem - $modules/system/gconftool2.py: - maintainers: Akasurde kevensen - labels: gconftool2 - $modules/system/homectl.py: - maintainers: jameslivulpi - $modules/system/interfaces_file.py: - maintainers: obourdon hryamzik - labels: interfaces_file - $modules/system/iptables_state.py: - maintainers: quidame - $modules/system/shutdown.py: - maintainers: nitzmahone samdoran aminvakil - $modules/system/java_cert.py: - maintainers: haad absynth76 - $modules/system/java_keystore.py: - maintainers: Mogztter quidame - $modules/system/kernel_blacklist.py: - maintainers: matze - $modules/system/launchd.py: - maintainers: martinm82 - $modules/system/lbu.py: - maintainers: kunkku - $modules/system/listen_ports_facts.py: - maintainers: ndavison - $modules/system/locale_gen.py: - maintainers: AugustusKling - $modules/system/lvg.py: - maintainers: abulimov - $modules/system/lvol.py: - maintainers: abulimov jhoekx zigaSRC unkaputtbar112 - $modules/system/make.py: - maintainers: LinusU - $modules/system/mksysb.py: - maintainers: $team_aix - labels: aix mksysb - $modules/system/modprobe.py: - maintainers: jdauphant mattjeffery - labels: modprobe - ignore: stygstra - $modules/system/nosh.py: - maintainers: tacatac - $modules/system/ohai.py: - maintainers: $team_ansible_core mpdehaan - labels: ohai - $modules/system/open_iscsi.py: - maintainers: srvg - $modules/system/openwrt_init.py: - maintainers: agaffney - $modules/system/osx_defaults.py: - notify: chris-short - maintainers: $team_macos notok - labels: macos osx_defaults - keywords: brew cask darwin homebrew macosx macports osx - $modules/system/pam_limits.py: - maintainers: giovannisciortino - labels: pam_limits - ignore: usawa - $modules/system/pamd.py: - maintainers: kevensen - $modules/system/parted.py: - maintainers: ColOfAbRiX rosowiecki jake2184 - $modules/system/pids.py: - maintainers: saranyasridharan - $modules/system/puppet.py: - maintainers: nibalizer emonty - labels: puppet - $modules/system/python_requirements_info.py: - maintainers: willthames - ignore: ryansb - $modules/system/runit.py: - maintainers: jsumners - $modules/system/sap_task_list_execute: - maintainers: rainerleber - $modules/system/sefcontext.py: - maintainers: dagwieers - $modules/system/selinux_permissive.py: - maintainers: mscherer - $modules/system/selogin.py: - maintainers: bachradsusi dankeder jamescassell - $modules/system/seport.py: - maintainers: dankeder - $modules/system/solaris_zone.py: - maintainers: $team_solaris pmarkham - labels: solaris - keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool - $modules/system/ssh_config.py: - maintainers: gaqzi Akasurde - $modules/system/sudoers.py: - maintainers: JonEllis - $modules/system/svc.py: - maintainers: bcoca - $modules/system/syspatch.py: - maintainers: precurse - $modules/system/sysrc.py: - maintainers: dlundgren - $modules/system/sysupgrade.py: - maintainers: precurse - $modules/system/timezone.py: - maintainers: indrajitr jasperla tmshn - $modules/system/ufw.py: - notify: felixfontein - maintainers: ahtik ovcharenko pyykkis - labels: ufw - $modules/system/vdo.py: - maintainers: rhawalsh bgurney-rh - $modules/system/xfconf.py: - maintainers: russoz jbenden - labels: xfconf - $modules/system/xfconf_info.py: - maintainers: russoz - labels: xfconf - $modules/system/xfs_quota.py: - maintainers: bushvin - $modules/web_infrastructure/apache2_mod_proxy.py: - maintainers: oboukili - $modules/web_infrastructure/apache2_module.py: - maintainers: berendt n0trax - ignore: robinro - $modules/web_infrastructure/deploy_helper.py: - maintainers: ramondelafuente - $modules/web_infrastructure/django_manage.py: - maintainers: russoz - ignore: scottanderson42 tastychutney - labels: django_manage - $modules/web_infrastructure/ejabberd_user.py: - maintainers: privateip - $modules/web_infrastructure/gunicorn.py: - maintainers: agmezr - $modules/web_infrastructure/htpasswd.py: - maintainers: $team_ansible_core - labels: htpasswd - $modules/web_infrastructure/jboss.py: - maintainers: $team_jboss jhoekx - labels: jboss - $modules/web_infrastructure/jenkins_build.py: - maintainers: brettmilford unnecessary-username - $modules/web_infrastructure/jenkins_job.py: - maintainers: sermilrod - $modules/web_infrastructure/jenkins_job_info.py: - maintainers: stpierre - $modules/web_infrastructure/jenkins_plugin.py: - maintainers: jtyr - $modules/web_infrastructure/jenkins_script.py: - maintainers: hogarthj - $modules/web_infrastructure/jira.py: - maintainers: Slezhuk tarka pertoft - ignore: DWSR - labels: jira - $modules/web_infrastructure/nginx_status_info.py: - maintainers: resmo - $modules/web_infrastructure/rundeck_acl_policy.py: - maintainers: nerzhul - $modules/web_infrastructure/rundeck_project.py: - maintainers: nerzhul - $modules/web_infrastructure/rundeck_job_run.py: - maintainers: phsmith - $modules/web_infrastructure/rundeck_job_executions_info.py: - maintainers: phsmith - $modules/web_infrastructure/sophos_utm/: - maintainers: $team_e_spirit - keywords: sophos utm - $modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py: - maintainers: $team_e_spirit stearz - keywords: sophos utm - $modules/web_infrastructure/sophos_utm/utm_proxy_exception.py: - maintainers: $team_e_spirit RickS-C137 - keywords: sophos utm - $modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py: - maintainers: stearz - $modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py: - maintainers: stearz - $modules/web_infrastructure/sophos_utm/utm_network_interface_address.py: - maintainers: steamx - $modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py: - maintainers: steamx - $modules/web_infrastructure/supervisorctl.py: - maintainers: inetfuture mattupstate - $modules/web_infrastructure/taiga_issue.py: - maintainers: lekum - $tests/a_module.py: - maintainers: felixfontein -######################### - tests/: - labels: tests - tests/unit/: - labels: unit - support: community - tests/integration: - labels: integration - support: community - tests/utils/: - maintainers: gundalow - labels: unit -macros: - actions: plugins/action - becomes: plugins/become - caches: plugins/cache - callbacks: plugins/callback - cliconfs: plugins/cliconf - connections: plugins/connection - doc_fragments: plugins/doc_fragments - filters: plugins/filter - inventories: plugins/inventory - lookups: plugins/lookup - module_utils: plugins/module_utils - modules: plugins/modules - terminals: plugins/terminal - tests: plugins/test - team_ansible_core: - team_aix: MorrisA bcoca d-little flynn1973 gforster kairoaraujo marvin-sinister mator molekuul ramooncamacho wtcross - team_bsd: JoergFiedler MacLemon bcoca dch jasperla mekanix opoplawski overhacked tuxillo - team_consul: sgargan - team_cyberark_conjur: jvanderhoof ryanprior - team_e_spirit: MatrixCrawler getjack - team_flatpak: JayKayy oolongbrothers - team_gitlab: Lunik Shaps dj-wasabi marwatk waheedi zanssa scodeman metanovii sh0shin nejch lgatellier suukit - team_hpux: bcoca davx8342 - team_huawei: QijunPan TommyLike edisonxiang freesky-edward hwDCN niuzhenguo xuxiaowei0512 yanzhangi zengchen1024 zhongjun2 - team_ipa: Akasurde Nosmoht fxfitz justchris1 - team_jboss: Wolfant jairojunior wbrefvem - team_keycloak: eikef ndclt - team_linode: InTheCloudDan decentral1se displague rmcintosh Charliekenney23 LBGarber - team_macos: Akasurde kyleabenson martinm82 danieljaouen indrajitr - team_manageiq: abellotti cben gtanzillo yaacov zgalor dkorn evertmulder - team_networking: NilashishC Qalthos danielmellado ganeshrn justjais trishnaguha sganesh-infoblox privateip - team_opennebula: ilicmilan meerkampdvv rsmontero xorel nilsding - team_oracle: manojmeda mross22 nalsaber - team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16 - team_redfish: mraineri tomasg2012 xmadsen renxulei rajeevkallur bhavya06 - team_rhn: FlossWare alikins barnabycourt vritant - team_scaleway: remyleone abarbare - team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l - team_suse: commel dcermak evrardjp lrupp toabctl AnderEnder alxgu andytom sealor - team_virt: joshainglis karmab tleguern Thulium-Drake Ajpantuso diff --git a/ansible_collections/community/general/.github/ISSUE_TEMPLATE/config.yml b/ansible_collections/community/general/.github/ISSUE_TEMPLATE/config.yml deleted file mode 100644 index f90bd1ad..00000000 --- a/ansible_collections/community/general/.github/ISSUE_TEMPLATE/config.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -# Ref: https://help.github.com/en/github/building-a-strong-community/configuring-issue-templates-for-your-repository#configuring-the-template-chooser -blank_issues_enabled: false # default: true -contact_links: -- name: Security bug report - url: https://docs.ansible.com/ansible-core/devel/community/reporting_bugs_and_features.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections - about: | - Please learn how to report security vulnerabilities here. - - For all security related bugs, email security@ansible.com - instead of using this issue tracker and you will receive - a prompt response. - - For more information, see - https://docs.ansible.com/ansible/latest/community/reporting_bugs_and_features.html -- name: Ansible Code of Conduct - url: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections - about: Be nice to other members of the community. -- name: Talks to the community - url: https://docs.ansible.com/ansible/latest/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#mailing-list-information - about: Please ask and answer usage questions here -- name: Working groups - url: https://github.com/ansible/community/wiki - about: Interested in improving a specific area? Become a part of a working group! -- name: For Enterprise - url: https://www.ansible.com/products/engine?utm_medium=github&utm_source=issue_template_chooser_ansible_collections - about: Red Hat offers support for the Ansible Automation Platform diff --git a/ansible_collections/community/general/.github/dependabot.yml b/ansible_collections/community/general/.github/dependabot.yml deleted file mode 100644 index 1cd41305..00000000 --- a/ansible_collections/community/general/.github/dependabot.yml +++ /dev/null @@ -1,6 +0,0 @@ -version: 2 -updates: - - package-ecosystem: "github-actions" - directory: "/" - interval: - schedule: "weekly" diff --git a/ansible_collections/community/general/.github/patchback.yml b/ansible_collections/community/general/.github/patchback.yml deleted file mode 100644 index 33ad6e84..00000000 --- a/ansible_collections/community/general/.github/patchback.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -backport_branch_prefix: patchback/backports/ -backport_label_prefix: backport- -target_branch_prefix: stable- -... diff --git a/ansible_collections/community/general/.github/settings.yml b/ansible_collections/community/general/.github/settings.yml deleted file mode 100644 index 8a5b8d32..00000000 --- a/ansible_collections/community/general/.github/settings.yml +++ /dev/null @@ -1,6 +0,0 @@ -# DO NOT MODIFY - -# Settings: https://probot.github.io/apps/settings/ -# Pull settings from https://github.com/ansible-collections/.github/blob/master/.github/settings.yml - -_extends: ".github" diff --git a/ansible_collections/community/general/.github/workflows/codeql-analysis.yml b/ansible_collections/community/general/.github/workflows/codeql-analysis.yml deleted file mode 100644 index 81884ac4..00000000 --- a/ansible_collections/community/general/.github/workflows/codeql-analysis.yml +++ /dev/null @@ -1,49 +0,0 @@ -name: "Code scanning - action" - -on: - schedule: - - cron: '26 19 * * 1' - -jobs: - CodeQL-Build: - - runs-on: ubuntu-latest - - steps: - - name: Checkout repository - uses: actions/checkout@v2 - with: - # We must fetch at least the immediate parents so that if this is - # a pull request then we can checkout the head. - fetch-depth: 2 - - # If this run was triggered by a pull request event, then checkout - # the head of the pull request instead of the merge commit. - - run: git checkout HEAD^2 - if: ${{ github.event_name == 'pull_request' }} - - # Initializes the CodeQL tools for scanning. - - name: Initialize CodeQL - uses: github/codeql-action/init@v1 - # Override language selection by uncommenting this and choosing your languages - # with: - # languages: go, javascript, csharp, python, cpp, java - - # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). - # If this step fails, then you should remove it and run the build manually (see below) - - name: Autobuild - uses: github/codeql-action/autobuild@v1 - - # ℹ️ Command-line programs to run using the OS shell. - # 📚 https://git.io/JvXDl - - # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines - # and modify them (or add more) to build your code if your project - # uses a compiled language - - #- run: | - # make bootstrap - # make release - - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v1 diff --git a/ansible_collections/community/general/.gitignore b/ansible_collections/community/general/.gitignore deleted file mode 100644 index c6c78b42..00000000 --- a/ansible_collections/community/general/.gitignore +++ /dev/null @@ -1,446 +0,0 @@ - -# Created by https://www.toptal.com/developers/gitignore/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv -# Edit at https://www.toptal.com/developers/gitignore?templates=git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv - -### dotenv ### -.env - -### Emacs ### -# -*- mode: gitignore; -*- -*~ -\#*\# -/.emacs.desktop -/.emacs.desktop.lock -*.elc -auto-save-list -tramp -.\#* - -# Org-mode -.org-id-locations -*_archive - -# flymake-mode -*_flymake.* - -# eshell files -/eshell/history -/eshell/lastdir - -# elpa packages -/elpa/ - -# reftex files -*.rel - -# AUCTeX auto folder -/auto/ - -# cask packages -.cask/ -dist/ - -# Flycheck -flycheck_*.el - -# server auth directory -/server/ - -# projectiles files -.projectile - -# directory configuration -.dir-locals.el - -# network security -/network-security.data - - -### Git ### -# Created by git for backups. To disable backups in Git: -# $ git config --global mergetool.keepBackup false -*.orig - -# Created by git when using merge tools for conflicts -*.BACKUP.* -*.BASE.* -*.LOCAL.* -*.REMOTE.* -*_BACKUP_*.txt -*_BASE_*.txt -*_LOCAL_*.txt -*_REMOTE_*.txt - -#!! ERROR: jupyternotebook is undefined. Use list command to see defined gitignore types !!# - -### Linux ### - -# temporary files which can be created if a process still has a handle open of a deleted file -.fuse_hidden* - -# KDE directory preferences -.directory - -# Linux trash folder which might appear on any partition or disk -.Trash-* - -# .nfs files are created when an open file is removed but is still being accessed -.nfs* - -### PyCharm+all ### -# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider -# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 - -# User-specific stuff -.idea/**/workspace.xml -.idea/**/tasks.xml -.idea/**/usage.statistics.xml -.idea/**/dictionaries -.idea/**/shelf - -# AWS User-specific -.idea/**/aws.xml - -# Generated files -.idea/**/contentModel.xml - -# Sensitive or high-churn files -.idea/**/dataSources/ -.idea/**/dataSources.ids -.idea/**/dataSources.local.xml -.idea/**/sqlDataSources.xml -.idea/**/dynamic.xml -.idea/**/uiDesigner.xml -.idea/**/dbnavigator.xml - -# Gradle -.idea/**/gradle.xml -.idea/**/libraries - -# Gradle and Maven with auto-import -# When using Gradle or Maven with auto-import, you should exclude module files, -# since they will be recreated, and may cause churn. Uncomment if using -# auto-import. -# .idea/artifacts -# .idea/compiler.xml -# .idea/jarRepositories.xml -# .idea/modules.xml -# .idea/*.iml -# .idea/modules -# *.iml -# *.ipr - -# CMake -cmake-build-*/ - -# Mongo Explorer plugin -.idea/**/mongoSettings.xml - -# File-based project format -*.iws - -# IntelliJ -out/ - -# mpeltonen/sbt-idea plugin -.idea_modules/ - -# JIRA plugin -atlassian-ide-plugin.xml - -# Cursive Clojure plugin -.idea/replstate.xml - -# Crashlytics plugin (for Android Studio and IntelliJ) -com_crashlytics_export_strings.xml -crashlytics.properties -crashlytics-build.properties -fabric.properties - -# Editor-based Rest Client -.idea/httpRequests - -# Android studio 3.1+ serialized cache file -.idea/caches/build_file_checksums.ser - -### PyCharm+all Patch ### -# Ignores the whole .idea folder and all .iml files -# See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360 - -.idea/ - -# Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023 - -*.iml -modules.xml -.idea/misc.xml -*.ipr - -# Sonarlint plugin -.idea/sonarlint - -### pydev ### -.pydevproject - -### Python ### -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - -# Distribution / packaging -.Python -build/ -develop-eggs/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -share/python-wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.nox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -*.py,cover -.hypothesis/ -.pytest_cache/ -cover/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 -db.sqlite3-journal - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -.pybuilder/ -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# IPython -profile_default/ -ipython_config.py - -# pyenv -# For a library or package, you might want to ignore these files since the code is -# intended to run in multiple environments; otherwise, check them in: -.python-version - -# pipenv -# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. -# However, in case of collaboration, if having platform-specific dependencies or dependencies -# having no cross-platform support, pipenv may install dependencies that don't work, or not -# install all needed dependencies. -#Pipfile.lock - -# PEP 582; used by e.g. github.com/David-OConnor/pyflow -__pypackages__/ - -# Celery stuff -celerybeat-schedule -celerybeat.pid - -# SageMath parsed files -*.sage.py - -# Environments -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ -.dmypy.json -dmypy.json - -# Pyre type checker -.pyre/ - -# pytype static type analyzer -.pytype/ - -# Cython debug symbols -cython_debug/ - -### Vim ### -# Swap -[._]*.s[a-v][a-z] -!*.svg # comment out if you don't need vector files -[._]*.sw[a-p] -[._]s[a-rt-v][a-z] -[._]ss[a-gi-z] -[._]sw[a-p] - -# Session -Session.vim -Sessionx.vim - -# Temporary -.netrwhist -# Auto-generated tag files -tags -# Persistent undo -[._]*.un~ - -### WebStorm ### -# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider -# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 - -# User-specific stuff - -# AWS User-specific - -# Generated files - -# Sensitive or high-churn files - -# Gradle - -# Gradle and Maven with auto-import -# When using Gradle or Maven with auto-import, you should exclude module files, -# since they will be recreated, and may cause churn. Uncomment if using -# auto-import. -# .idea/artifacts -# .idea/compiler.xml -# .idea/jarRepositories.xml -# .idea/modules.xml -# .idea/*.iml -# .idea/modules -# *.iml -# *.ipr - -# CMake - -# Mongo Explorer plugin - -# File-based project format - -# IntelliJ - -# mpeltonen/sbt-idea plugin - -# JIRA plugin - -# Cursive Clojure plugin - -# Crashlytics plugin (for Android Studio and IntelliJ) - -# Editor-based Rest Client - -# Android studio 3.1+ serialized cache file - -### WebStorm Patch ### -# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 - -# *.iml -# modules.xml -# .idea/misc.xml -# *.ipr - -# Sonarlint plugin -# https://plugins.jetbrains.com/plugin/7973-sonarlint -.idea/**/sonarlint/ - -# SonarQube Plugin -# https://plugins.jetbrains.com/plugin/7238-sonarqube-community-plugin -.idea/**/sonarIssues.xml - -# Markdown Navigator plugin -# https://plugins.jetbrains.com/plugin/7896-markdown-navigator-enhanced -.idea/**/markdown-navigator.xml -.idea/**/markdown-navigator-enh.xml -.idea/**/markdown-navigator/ - -# Cache file creation bug -# See https://youtrack.jetbrains.com/issue/JBR-2257 -.idea/$CACHE_FILE$ - -# CodeStream plugin -# https://plugins.jetbrains.com/plugin/12206-codestream -.idea/codestream.xml - -### Windows ### -# Windows thumbnail cache files -Thumbs.db -Thumbs.db:encryptable -ehthumbs.db -ehthumbs_vista.db - -# Dump file -*.stackdump - -# Folder config file -[Dd]esktop.ini - -# Recycle Bin used on file shares -$RECYCLE.BIN/ - -# Windows Installer files -*.cab -*.msi -*.msix -*.msm -*.msp - -# Windows shortcuts -*.lnk - -# End of https://www.toptal.com/developers/gitignore/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv diff --git a/ansible_collections/community/general/CHANGELOG.rst b/ansible_collections/community/general/CHANGELOG.rst deleted file mode 100644 index 78a59b7f..00000000 --- a/ansible_collections/community/general/CHANGELOG.rst +++ /dev/null @@ -1,929 +0,0 @@ -=============================== -Community General Release Notes -=============================== - -.. contents:: Topics - -This changelog describes changes after version 3.0.0. - -v4.6.1 -====== - -Release Summary ---------------- - -Extraordinary bugfix release to fix a breaking change in ``terraform``. - -Bugfixes --------- - -- lxd inventory plugin - do not crash if OS and release metadata are not present - (https://github.com/ansible-collections/community.general/pull/4351). -- terraform - revert bugfix https://github.com/ansible-collections/community.general/pull/4281 that tried to fix ``variable`` handling to allow complex values. It turned out that this was breaking several valid use-cases (https://github.com/ansible-collections/community.general/issues/4367, https://github.com/ansible-collections/community.general/pull/4370). - -v4.6.0 -====== - -Release Summary ---------------- - -Regular feature and bugfix release. - -Minor Changes -------------- - -- jira - when creating a comment, ``fields`` now is used for additional data (https://github.com/ansible-collections/community.general/pull/4304). -- ldap_entry - add support for recursive deletion (https://github.com/ansible-collections/community.general/issues/3613). -- mksysb - revamped the module using ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/3295). -- nmcli - add missing connection aliases ``802-3-ethernet`` and ``802-11-wireless`` (https://github.com/ansible-collections/community.general/pull/4108). -- nmcli - remove nmcli modify dependency on ``type`` parameter (https://github.com/ansible-collections/community.general/issues/2858). -- npm - add ability to use ``production`` flag when ``ci`` is set (https://github.com/ansible-collections/community.general/pull/4299). -- pacman - add ``remove_nosave`` parameter to avoid saving modified configuration files as ``.pacsave`` files. (https://github.com/ansible-collections/community.general/pull/4316, https://github.com/ansible-collections/community.general/issues/4315). -- pacman - now implements proper change detection for ``update_cache=true``. Adds ``cache_updated`` return value to when ``update_cache=true`` to report this result independently of the module's overall changed return value (https://github.com/ansible-collections/community.general/pull/4337). -- pipx - added options ``editable`` and ``pip_args`` (https://github.com/ansible-collections/community.general/issues/4300). -- proxmox inventory plugin - add support for client-side jinja filters (https://github.com/ansible-collections/community.general/issues/3553). -- redis - add authentication parameters ``login_user``, ``tls``, ``validate_certs``, and ``ca_certs`` (https://github.com/ansible-collections/community.general/pull/4207). -- syslog_json - add option to skip logging of ``gather_facts`` playbook tasks; use v2 callback API (https://github.com/ansible-collections/community.general/pull/4223). -- zypper - add support for ``--clean-deps`` option to remove packages that depend on a package being removed (https://github.com/ansible-collections/community.general/pull/4195). - -Deprecated Features -------------------- - -- pacman - from community.general 5.0.0 on, the ``changed`` status of ``update_cache`` will no longer be ignored if ``name`` or ``upgrade`` is specified. To keep the old behavior, add something like ``register: result`` and ``changed_when: result.packages | length > 0`` to your task (https://github.com/ansible-collections/community.general/pull/4329). - -Bugfixes --------- - -- filesize - add support for busybox dd implementation, that is used by default on Alpine linux (https://github.com/ansible-collections/community.general/pull/4288, https://github.com/ansible-collections/community.general/issues/4259). -- linode inventory plugin - fix configuration handling relating to inventory filtering (https://github.com/ansible-collections/community.general/pull/4336). -- mksysb - fixed bug for parameter ``backup_dmapi_fs`` was passing the wrong CLI argument (https://github.com/ansible-collections/community.general/pull/3295). -- pacman - Use ``--groups`` instead of ``--group`` (https://github.com/ansible-collections/community.general/pull/4312). -- pacman - fix URL based package installation (https://github.com/ansible-collections/community.general/pull/4286, https://github.com/ansible-collections/community.general/issues/4285). -- pacman - fix ``upgrade=yes`` (https://github.com/ansible-collections/community.general/pull/4275, https://github.com/ansible-collections/community.general/issues/4274). -- pacman - make sure that ``packages`` is always returned when ``name`` or ``upgrade`` is specified, also if nothing is done (https://github.com/ansible-collections/community.general/pull/4329). -- pacman - when the ``update_cache`` option is combined with another option such as ``upgrade``, report ``changed`` based on the actions performed by the latter option. This was the behavior in community.general 4.4.0 and before. In community.general 4.5.0, a task combining these options would always report ``changed`` (https://github.com/ansible-collections/community.general/pull/4318). -- proxmox inventory plugin - always convert strings that follow the ``key=value[,key=value[...]]`` form into dictionaries (https://github.com/ansible-collections/community.general/pull/4349). -- proxmox inventory plugin - fixed the ``description`` field being ignored if it contained a comma (https://github.com/ansible-collections/community.general/issues/4348). -- proxmox_kvm - fix error in check when creating or cloning (https://github.com/ansible-collections/community.general/pull/4306). -- proxmox_kvm - fix error when checking whether Proxmox VM exists (https://github.com/ansible-collections/community.general/pull/4287). -- terraform - fix ``variable`` handling to allow complex values (https://github.com/ansible-collections/community.general/pull/4281). - -Known Issues ------------- - -- pacman - ``update_cache`` cannot differentiate between up to date and outdated package lists and will report ``changed`` in both situations (https://github.com/ansible-collections/community.general/pull/4318). -- pacman - binaries specified in the ``executable`` parameter must support ``--print-format`` in order to be used by this module. In particular, AUR helper ``yay`` is known not to currently support it (https://github.com/ansible-collections/community.general/pull/4312). - -v4.5.0 -====== - -Release Summary ---------------- - -Regular feature and bugfix release. - -Minor Changes -------------- - -- Avoid internal ansible-core module_utils in favor of equivalent public API available since at least Ansible 2.9. This fixes some instances added since the last time this was fixed (https://github.com/ansible-collections/community.general/pull/4232). -- ansible_galaxy_install - added option ``no_deps`` to the module (https://github.com/ansible-collections/community.general/issues/4174). -- gitlab_group_variable - new ``variables`` parameter (https://github.com/ansible-collections/community.general/pull/4038 and https://github.com/ansible-collections/community.general/issues/4074). -- keycloak_* modules - added connection timeout parameter when calling server (https://github.com/ansible-collections/community.general/pull/4168). -- linode inventory plugin - add support for caching inventory results (https://github.com/ansible-collections/community.general/pull/4179). -- opentelemetry_plugin - enrich service when using the ``jenkins``, ``hetzner`` or ``jira`` modules (https://github.com/ansible-collections/community.general/pull/4105). -- pacman - the module has been rewritten and is now much faster when using ``state=latest``. Operations are now done all packages at once instead of package per package and the configured output format of ``pacman`` no longer affect the module's operation. (https://github.com/ansible-collections/community.general/pull/3907, https://github.com/ansible-collections/community.general/issues/3783, https://github.com/ansible-collections/community.general/issues/4079) -- passwordstore lookup plugin - add configurable ``lock`` and ``locktimeout`` options to avoid race conditions in itself and in the ``pass`` utility it calls. By default, the plugin now locks on write operations (https://github.com/ansible-collections/community.general/pull/4194). -- proxmox modules - move common code into ``module_utils`` (https://github.com/ansible-collections/community.general/pull/4029). -- proxmox_kvm - added EFI disk support when creating VM with OVMF UEFI BIOS with new ``efidisk0`` option (https://github.com/ansible-collections/community.general/pull/4106, https://github.com/ansible-collections/community.general/issues/1638). -- proxmox_kwm - add ``win11`` to ``ostype`` parameter for Windows 11 and Windows Server 2022 support (https://github.com/ansible-collections/community.general/issues/4023, https://github.com/ansible-collections/community.general/pull/4191). - -Bugfixes --------- - -- dconf - skip processes that disappeared while we inspected them (https://github.com/ansible-collections/community.general/issues/4151). -- gitlab_group_variable - add missing documentation about GitLab versions that support ``environment_scope`` and ``variable_type`` (https://github.com/ansible-collections/community.general/pull/4038). -- gitlab_group_variable - allow to set same variable name under different environment scopes. Due this change, the return value ``group_variable`` differs from previous version in check mode. It was counting ``updated`` values, because it was accidentally overwriting environment scopes (https://github.com/ansible-collections/community.general/pull/4038). -- gitlab_group_variable - fix idempotent change behaviour for float and integer variables (https://github.com/ansible-collections/community.general/pull/4038). -- gitlab_project_variable - ``value`` is not necessary when deleting variables (https://github.com/ansible-collections/community.general/pull/4150). -- gitlab_runner - make ``project`` and ``owned`` mutually exclusive (https://github.com/ansible-collections/community.general/pull/4136). -- homebrew_cask - fix force install operation (https://github.com/ansible-collections/community.general/issues/3703). -- imc_rest - fixes the module failure due to the usage of ``itertools.izip_longest`` which is not available in Python 3 (https://github.com/ansible-collections/community.general/issues/4206). -- ini_file - when removing nothing do not report changed (https://github.com/ansible-collections/community.general/issues/4154). -- keycloak_user_federation - creating a user federation while specifying an ID (that does not exist yet) no longer fail with a 404 Not Found (https://github.com/ansible-collections/community.general/pull/4212). -- keycloak_user_federation - mappers auto-created by keycloak are matched and merged by their name and no longer create duplicated entries (https://github.com/ansible-collections/community.general/pull/4212). -- mail callback plugin - fix encoding of the name of sender and recipient (https://github.com/ansible-collections/community.general/issues/4060, https://github.com/ansible-collections/community.general/pull/4061). -- passwordstore lookup plugin - fix error detection for non-English locales (https://github.com/ansible-collections/community.general/pull/4219). -- passwordstore lookup plugin - prevent returning path names as passwords by accident (https://github.com/ansible-collections/community.general/issues/4185, https://github.com/ansible-collections/community.general/pull/4192). -- vdo - fix options error (https://github.com/ansible-collections/community.general/pull/4163). -- yum_versionlock - fix matching of existing entries with names passed to the module. Match yum and dnf lock format (https://github.com/ansible-collections/community.general/pull/4183). - -New Modules ------------ - -Cloud -~~~~~ - -scaleway -^^^^^^^^ - -- scaleway_private_network - Scaleway private network management - -Storage -~~~~~~~ - -pmem -^^^^ - -- pmem - Configure Intel Optane Persistent Memory modules - -v4.4.0 -====== - -Release Summary ---------------- - -Regular features and bugfixes release. - -Minor Changes -------------- - -- cobbler inventory plugin - add ``include_profiles`` option (https://github.com/ansible-collections/community.general/pull/4068). -- gitlab_project_variable - new ``variables`` parameter (https://github.com/ansible-collections/community.general/issues/4038). -- icinga2 inventory plugin - implemented constructed interface (https://github.com/ansible-collections/community.general/pull/4088). -- linode inventory plugin - allow templating of ``access_token`` variable in Linode inventory plugin (https://github.com/ansible-collections/community.general/pull/4040). -- lists_mergeby filter plugin - add parameters ``list_merge`` and ``recursive``. These are only supported when used with ansible-base 2.10 or ansible-core, but not with Ansible 2.9 (https://github.com/ansible-collections/community.general/pull/4058). -- lxc_container - added ``wait_for_container`` parameter. If ``true`` the module will wait until the running task reports success as the status (https://github.com/ansible-collections/community.general/pull/4039). -- mail callback plugin - add ``Message-ID`` and ``Date`` headers (https://github.com/ansible-collections/community.general/issues/4055, https://github.com/ansible-collections/community.general/pull/4056). -- mail callback plugin - properly use Ansible's option handling to split lists (https://github.com/ansible-collections/community.general/pull/4140). -- nmcli - adds ``routes6`` and ``route_metric6`` parameters for supporting IPv6 routes (https://github.com/ansible-collections/community.general/issues/4059). -- opennebula - add the release action for VMs in the ``HOLD`` state (https://github.com/ansible-collections/community.general/pull/4036). -- opentelemetry_plugin - enrich service when using the ``docker_login`` (https://github.com/ansible-collections/community.general/pull/4104). -- proxmox modules - move ``HAS_PROXMOXER`` check into ``module_utils`` (https://github.com/ansible-collections/community.general/pull/4030). -- scaleway inventory plugin - add profile parameter ``scw_profile`` (https://github.com/ansible-collections/community.general/pull/4049). -- snap - add option ``options`` permitting to set options using the ``snap set`` command (https://github.com/ansible-collections/community.general/pull/3943). - -Deprecated Features -------------------- - -- mail callback plugin - not specifying ``sender`` is deprecated and will be disallowed in community.general 6.0.0 (https://github.com/ansible-collections/community.general/pull/4140). - -Bugfixes --------- - -- cargo - fix detection of outdated packages when ``state=latest`` (https://github.com/ansible-collections/community.general/pull/4052). -- cargo - fix incorrectly reported changed status for packages with a name containing a hyphen (https://github.com/ansible-collections/community.general/issues/4044, https://github.com/ansible-collections/community.general/pull/4052). -- gitlab_project_variable - add missing documentation about GitLab versions that support ``environment_scope`` and ``variable_type`` (https://github.com/ansible-collections/community.general/issues/4038). -- gitlab_project_variable - allow to set same variable name under different environment scopes. Due this change, the return value ``project_variable`` differs from previous version in check mode. It was counting ``updated`` values, because it was accidentally overwriting environment scopes (https://github.com/ansible-collections/community.general/issues/4038). -- gitlab_project_variable - fix idempotent change behaviour for float and integer variables (https://github.com/ansible-collections/community.general/issues/4038). -- gitlab_runner - use correct API endpoint to create and retrieve project level runners when using ``project`` (https://github.com/ansible-collections/community.general/pull/3965). -- listen_ports_facts - local port regex was not handling well IPv6 only binding. Fixes the regex for ``ss`` (https://github.com/ansible-collections/community.general/pull/4092). -- mail callback plugin - fix crash on Python 3 (https://github.com/ansible-collections/community.general/issues/4025, https://github.com/ansible-collections/community.general/pull/4026). -- opentelemetry - fix generating a trace with a task containing ``no_log: true`` (https://github.com/ansible-collections/community.general/pull/4043). -- python_requirements_info - store ``mismatched`` return values per package as documented in the module (https://github.com/ansible-collections/community.general/pull/4078). -- yarn - fix incorrect handling of ``yarn list`` and ``yarn global list`` output that could result in fatal error (https://github.com/ansible-collections/community.general/pull/4050). -- yarn - fix incorrectly reported status when installing a package globally (https://github.com/ansible-collections/community.general/issues/4045, https://github.com/ansible-collections/community.general/pull/4050). -- yarn - fix missing ``~`` expansion in yarn global install folder which resulted in incorrect task status (https://github.com/ansible-collections/community.general/issues/4045, https://github.com/ansible-collections/community.general/pull/4048). - -New Modules ------------ - -System -~~~~~~ - -- homectl - Manage user accounts with systemd-homed - -v4.3.0 -====== - -Release Summary ---------------- - -Regular feature and bugfix release. - -Minor Changes -------------- - -- ipa_dnszone - ``dynamicupdate`` is now a boolean parameter, instead of a string parameter accepting ``"true"`` and ``"false"``. Also the module is now idempotent with respect to ``dynamicupdate`` (https://github.com/ansible-collections/community.general/pull/3374). -- ipa_dnszone - add DNS zone synchronization support (https://github.com/ansible-collections/community.general/pull/3374). -- ipmi_power - add ``machine`` option to ensure the power state via the remote target address (https://github.com/ansible-collections/community.general/pull/3968). -- mattermost - add the possibility to send attachments instead of text messages (https://github.com/ansible-collections/community.general/pull/3946). -- nmcli - add ``wireguard`` connection type (https://github.com/ansible-collections/community.general/pull/3985). -- proxmox - add ``clone`` parameter (https://github.com/ansible-collections/community.general/pull/3930). -- puppet - remove deprecation for ``show_diff`` parameter. Its alias ``show-diff`` is still deprecated and will be removed in community.general 7.0.0 (https://github.com/ansible-collections/community.general/pull/3980). -- scaleway_compute - add possibility to use project identifier (new ``project`` option) instead of deprecated organization identifier (https://github.com/ansible-collections/community.general/pull/3951). -- scaleway_volume - all volumes are systematically created on par1 (https://github.com/ansible-collections/community.general/pull/3964). - -Bugfixes --------- - -- Various modules and plugins - use vendored version of ``distutils.version`` instead of the deprecated Python standard library ``distutils`` (https://github.com/ansible-collections/community.general/pull/3936). -- alternatives - fix output parsing for alternatives groups (https://github.com/ansible-collections/community.general/pull/3976). -- jail connection plugin - replace deprecated ``distutils.spawn.find_executable`` with Ansible's ``get_bin_path`` to find the executable (https://github.com/ansible-collections/community.general/pull/3934). -- lxd connection plugin - replace deprecated ``distutils.spawn.find_executable`` with Ansible's ``get_bin_path`` to find the ``lxc`` executable (https://github.com/ansible-collections/community.general/pull/3934). -- passwordstore lookup plugin - replace deprecated ``distutils.util.strtobool`` with Ansible's ``convert_bool.boolean`` to interpret values for the ``create``, ``returnall``, ``overwrite``, 'backup``, and ``nosymbols`` options (https://github.com/ansible-collections/community.general/pull/3934). -- say callback plugin - replace deprecated ``distutils.spawn.find_executable`` with Ansible's ``get_bin_path`` to find the ``say`` resp. ``espeak`` executables (https://github.com/ansible-collections/community.general/pull/3934). -- scaleway_user_data - fix double-quote added where no double-quote is needed to user data in scaleway's server (``Content-type`` -> ``Content-Type``) (https://github.com/ansible-collections/community.general/pull/3940). -- slack - add ``charset`` to HTTP headers to avoid Slack API warning (https://github.com/ansible-collections/community.general/issues/3932). -- zone connection plugin - replace deprecated ``distutils.spawn.find_executable`` with Ansible's ``get_bin_path`` to find the executable (https://github.com/ansible-collections/community.general/pull/3934). - -New Plugins ------------ - -Filter -~~~~~~ - -- counter - Counts hashable elements in a sequence - -New Modules ------------ - -Identity -~~~~~~~~ - -keycloak -^^^^^^^^ - -- keycloak_realm_info - Allows obtaining Keycloak realm public information via Keycloak API - -Packaging -~~~~~~~~~ - -language -^^^^^^^^ - -- cargo - Manage Rust packages with cargo - -System -~~~~~~ - -- sudoers - Manage sudoers files - -v4.2.0 -====== - -Release Summary ---------------- - -Regular bugfix and feature release. - -Minor Changes -------------- - -- aix_filesystem - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3833). -- aix_lvg - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3834). -- gitlab - add more token authentication support with the new options ``api_oauth_token`` and ``api_job_token`` (https://github.com/ansible-collections/community.general/issues/705). -- gitlab_group, gitlab_project - add new option ``avatar_path`` (https://github.com/ansible-collections/community.general/pull/3792). -- gitlab_project - add new option ``default_branch`` to gitlab_project (if ``readme = true``) (https://github.com/ansible-collections/community.general/pull/3792). -- hponcfg - revamped module using ModuleHelper (https://github.com/ansible-collections/community.general/pull/3840). -- icinga2 inventory plugin - added the ``display_name`` field to variables (https://github.com/ansible-collections/community.general/issues/3875, https://github.com/ansible-collections/community.general/pull/3906). -- icinga2 inventory plugin - inventory object names are changable using ``inventory_attr`` in your config file to the host object name, address, or display_name fields (https://github.com/ansible-collections/community.general/issues/3875, https://github.com/ansible-collections/community.general/pull/3906). -- ip_netns - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3822). -- iso_extract - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3805). -- java_cert - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3835). -- jira - add support for Bearer token auth (https://github.com/ansible-collections/community.general/pull/3838). -- keycloak_user_federation - add sssd user federation support (https://github.com/ansible-collections/community.general/issues/3767). -- logentries - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3807). -- logstash_plugin - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3808). -- lxc_container - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3851). -- lxd connection plugin - make sure that ``ansible_lxd_host``, ``ansible_executable``, and ``ansible_lxd_executable`` work (https://github.com/ansible-collections/community.general/pull/3798). -- lxd inventory plugin - support virtual machines (https://github.com/ansible-collections/community.general/pull/3519). -- module_helper module utils - added decorators ``check_mode_skip`` and ``check_mode_skip_returns`` for skipping methods when ``check_mode=True`` (https://github.com/ansible-collections/community.general/pull/3849). -- monit - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3821). -- nmcli - add multiple addresses support for ``ip6`` parameter (https://github.com/ansible-collections/community.general/issues/1088). -- nmcli - add support for ``eui64`` and ``ipv6privacy`` parameters (https://github.com/ansible-collections/community.general/issues/3357). -- python_requirements_info - returns python version broken down into its components, and some minor refactoring (https://github.com/ansible-collections/community.general/pull/3797). -- svc - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3829). -- xattr - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3806). -- xfconf - minor refactor on the base class for the module (https://github.com/ansible-collections/community.general/pull/3919). - -Deprecated Features -------------------- - -- module_helper module utils - deprecated the attribute ``ModuleHelper.VarDict`` (https://github.com/ansible-collections/community.general/pull/3801). - -Bugfixes --------- - -- icinga2 inventory plugin - handle 404 error when filter produces no results (https://github.com/ansible-collections/community.general/issues/3875, https://github.com/ansible-collections/community.general/pull/3906). -- interfaces_file - fixed the check for existing option in interface (https://github.com/ansible-collections/community.general/issues/3841). -- jira - fixed bug where module returns error related to dictionary key ``body`` (https://github.com/ansible-collections/community.general/issues/3419). -- nmcli - fix returning "changed" when no mask set for IPv4 or IPv6 addresses on task rerun (https://github.com/ansible-collections/community.general/issues/3768). -- nmcli - pass ``flags``, ``ingress``, ``egress`` params to ``nmcli`` (https://github.com/ansible-collections/community.general/issues/1086). -- nrdp callback plugin - fix error ``string arguments without an encoding`` (https://github.com/ansible-collections/community.general/issues/3903). -- opentelemetry_plugin - honour ``ignore_errors`` when a task has failed instead of reporting an error (https://github.com/ansible-collections/community.general/pull/3837). -- pipx - passes the correct command line option ``--include-apps`` (https://github.com/ansible-collections/community.general/issues/3791). -- proxmox - fixed ``onboot`` parameter causing module failures when undefined (https://github.com/ansible-collections/community.general/issues/3844). -- python_requirements_info - fails if version operator used without version (https://github.com/ansible-collections/community.general/pull/3785). - -New Modules ------------ - -Net Tools -~~~~~~~~~ - -- dnsimple_info - Pull basic info from DNSimple API - -Remote Management -~~~~~~~~~~~~~~~~~ - -redfish -^^^^^^^ - -- ilo_redfish_config - Sets or updates configuration attributes on HPE iLO with Redfish OEM extensions -- ilo_redfish_info - Gathers server information through iLO using Redfish APIs - -Source Control -~~~~~~~~~~~~~~ - -gitlab -^^^^^^ - -- gitlab_branch - Create or delete a branch - -v4.1.0 -====== - -Release Summary ---------------- - -Regular bugfix and feature release. - -Minor Changes -------------- - -- gitlab - clean up modules and utils (https://github.com/ansible-collections/community.general/pull/3694). -- ipmi_boot - add support for user-specified IPMI encryption key (https://github.com/ansible-collections/community.general/issues/3698). -- ipmi_power - add support for user-specified IPMI encryption key (https://github.com/ansible-collections/community.general/issues/3698). -- listen_ports_facts - add support for ``ss`` command besides ``netstat`` (https://github.com/ansible-collections/community.general/pull/3708). -- lxd_container - adds ``type`` option which also allows to operate on virtual machines and not just containers (https://github.com/ansible-collections/community.general/pull/3661). -- nmcli - add multiple addresses support for ``ip4`` parameter (https://github.com/ansible-collections/community.general/issues/1088, https://github.com/ansible-collections/community.general/pull/3738). -- open_iscsi - extended module to allow rescanning of established session for one or all targets (https://github.com/ansible-collections/community.general/issues/3763). -- pacman - add ``stdout`` and ``stderr`` as return values (https://github.com/ansible-collections/community.general/pull/3758). -- redfish_command - add ``GetHostInterfaces`` command to enable reporting Redfish Host Interface information (https://github.com/ansible-collections/community.general/issues/3693). -- redfish_command - add ``SetHostInterface`` command to enable configuring the Redfish Host Interface (https://github.com/ansible-collections/community.general/issues/3632). - -Bugfixes --------- - -- github_repo - ``private`` and ``description`` attributes should not be set to default values when the repo already exists (https://github.com/ansible-collections/community.general/pull/2386). -- terraform - fix command options being ignored during planned/plan in function ``build_plan`` such as ``lock`` or ``lock_timeout`` (https://github.com/ansible-collections/community.general/issues/3707, https://github.com/ansible-collections/community.general/pull/3726). - -New Plugins ------------ - -Inventory -~~~~~~~~~ - -- xen_orchestra - Xen Orchestra inventory source - -Lookup -~~~~~~ - -- revbitspss - Get secrets from RevBits PAM server - -v4.0.2 -====== - -Release Summary ---------------- - -Bugfix release for today's Ansible 5.0.0 beta 2. - -Deprecated Features -------------------- - -- Support for Ansible 2.9 and ansible-base 2.10 is deprecated, and will be removed in the next major release (community.general 5.0.0) next spring. While most content will probably still work with ansible-base 2.10, we will remove symbolic links for modules and action plugins, which will make it impossible to use them with Ansible 2.9 anymore. Please use community.general 4.x.y with Ansible 2.9 and ansible-base 2.10, as these releases will continue to support Ansible 2.9 and ansible-base 2.10 even after they are End of Life (https://github.com/ansible-community/community-topics/issues/50, https://github.com/ansible-collections/community.general/pull/3723). - -Bugfixes --------- - -- counter_enabled callback plugin - fix output to correctly display host and task counters in serial mode (https://github.com/ansible-collections/community.general/pull/3709). -- ldap_search - allow it to be used even in check mode (https://github.com/ansible-collections/community.general/issues/3619). -- lvol - allows logical volumes to be created with certain size arguments prefixed with ``+`` to preserve behavior of older versions of this module (https://github.com/ansible-collections/community.general/issues/3665). -- nmcli - fixed falsely reported changed status when ``mtu`` is omitted with ``dummy`` connections (https://github.com/ansible-collections/community.general/issues/3612, https://github.com/ansible-collections/community.general/pull/3625). - -v4.0.1 -====== - -Release Summary ---------------- - -Bugfix release for today's Ansible 5.0.0 beta 1. - -Bugfixes --------- - -- a_module test plugin - fix crash when testing a module name that was tombstoned (https://github.com/ansible-collections/community.general/pull/3660). -- xattr - fix exception caused by ``_run_xattr()`` raising a ``ValueError`` due to a mishandling of base64-encoded value (https://github.com/ansible-collections/community.general/issues/3673). - -v4.0.0 -====== - -Release Summary ---------------- - -This is release 4.0.0 of ``community.general``, released on 2021-11-02. - -Major Changes -------------- - -- bitbucket_* modules - ``client_id`` is no longer marked as ``no_log=true``. If you relied on its value not showing up in logs and output, please mark the whole tasks with ``no_log: true`` (https://github.com/ansible-collections/community.general/pull/2045). - -Minor Changes -------------- - -- Avoid internal ansible-core module_utils in favor of equivalent public API available since at least Ansible 2.9 (https://github.com/ansible-collections/community.general/pull/2877). -- ModuleHelper module utils - improved mechanism for customizing the calculation of ``changed`` (https://github.com/ansible-collections/community.general/pull/2514). -- Remove unnecessary ``__init__.py`` files from ``plugins/`` (https://github.com/ansible-collections/community.general/pull/2632). -- apache2_module - minor refactoring improving code quality, readability and speed (https://github.com/ansible-collections/community.general/pull/3106). -- archive - added ``dest_state`` return value to describe final state of ``dest`` after successful task execution (https://github.com/ansible-collections/community.general/pull/2913). -- archive - added ``exclusion_patterns`` option to exclude files or subdirectories from archives (https://github.com/ansible-collections/community.general/pull/2616). -- archive - refactoring prior to fix for idempotency checks. The fix will be a breaking change and only appear in community.general 4.0.0 (https://github.com/ansible-collections/community.general/pull/2987). -- bitbucket_* modules - add ``user`` and ``password`` options for Basic authentication (https://github.com/ansible-collections/community.general/pull/2045). -- chroot connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). -- cloud_init_data_facts - minor refactor (https://github.com/ansible-collections/community.general/pull/2557). -- cmd (Module Helper) module utils - ``CmdMixin`` now pulls the value for ``run_command()`` params from ``self.vars``, as opposed to previously retrieving those from ``self.module.params`` (https://github.com/ansible-collections/community.general/pull/2517). -- composer - add ``composer_executable`` option (https://github.com/ansible-collections/community.general/issues/2649). -- datadog_event - adding parameter ``api_host`` to allow selecting a datadog API endpoint instead of using the default one (https://github.com/ansible-collections/community.general/issues/2774, https://github.com/ansible-collections/community.general/pull/2775). -- datadog_monitor - allow creation of composite datadog monitors (https://github.com/ansible-collections/community.general/issues/2956). -- dig lookup plugin - add ``retry_servfail`` option (https://github.com/ansible-collections/community.general/pull/3247). -- dnsimple - module rewrite to include support for python-dnsimple>=2.0.0; also add ``sandbox`` parameter (https://github.com/ansible-collections/community.general/pull/2946). -- elastic callback plugin - enriched the stacktrace information with the ``message``, ``exception`` and ``stderr`` fields from the failed task (https://github.com/ansible-collections/community.general/pull/3556). -- filesystem - cleanup and revamp module, tests and doc. Pass all commands to ``module.run_command()`` as lists. Move the device-vs-mountpoint logic to ``grow()`` method. Give to all ``get_fs_size()`` the same logic and error handling. (https://github.com/ansible-collections/community.general/pull/2472). -- filesystem - extend support for FreeBSD. Avoid potential data loss by checking existence of a filesystem with ``fstyp`` (native command) if ``blkid`` (foreign command) doesn't find one. Add support for character devices and ``ufs`` filesystem type (https://github.com/ansible-collections/community.general/pull/2902). -- flatpak - add ``no_dependencies`` parameter (https://github.com/ansible/ansible/pull/55452, https://github.com/ansible-collections/community.general/pull/2751). -- flatpak - allows installing or uninstalling a list of packages (https://github.com/ansible-collections/community.general/pull/2521). -- funcd connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). -- gem - add ``bindir`` option to specify an installation path for executables such as ``/home/user/bin`` or ``/home/user/.local/bin`` (https://github.com/ansible-collections/community.general/pull/2837). -- gem - add ``norc`` option to avoid loading any ``.gemrc`` file (https://github.com/ansible-collections/community.general/pull/2837). -- github_repo - add new option ``api_url`` to allow working with on premises installations (https://github.com/ansible-collections/community.general/pull/3038). -- gitlab_group - add new options ``project_creation_level``, ``auto_devops_enabled``, ``subgroup_creation_level`` (https://github.com/ansible-collections/community.general/pull/3248). -- gitlab_group - add new property ``require_two_factor_authentication`` (https://github.com/ansible-collections/community.general/pull/3367). -- gitlab_group_members - ``gitlab_user`` can now also be a list of users (https://github.com/ansible-collections/community.general/pull/3047). -- gitlab_group_members - added functionality to set all members exactly as given (https://github.com/ansible-collections/community.general/pull/3047). -- gitlab_project - add new options ``allow_merge_on_skipped_pipeline``, ``only_allow_merge_if_all_discussions_are_resolved``, ``only_allow_merge_if_pipeline_succeeds``, ``packages_enabled``, ``remove_source_branch_after_merge``, ``squash_option`` (https://github.com/ansible-collections/community.general/pull/3002). -- gitlab_project - add new properties ``ci_config_path`` and ``shared_runners_enabled`` (https://github.com/ansible-collections/community.general/pull/3379). -- gitlab_project - projects can be created under other user's namespaces with the new ``username`` option (https://github.com/ansible-collections/community.general/pull/2824). -- gitlab_project_members - ``gitlab_user`` can now also be a list of users (https://github.com/ansible-collections/community.general/pull/3319). -- gitlab_project_members - added functionality to set all members exactly as given (https://github.com/ansible-collections/community.general/pull/3319). -- gitlab_runner - support project-scoped gitlab.com runners registration (https://github.com/ansible-collections/community.general/pull/634). -- gitlab_user - add ``expires_at`` option (https://github.com/ansible-collections/community.general/issues/2325). -- gitlab_user - add functionality for adding external identity providers to a GitLab user (https://github.com/ansible-collections/community.general/pull/2691). -- gitlab_user - allow to reset an existing password with the new ``reset_password`` option (https://github.com/ansible-collections/community.general/pull/2691). -- gitlab_user - specifying a password is no longer necessary (https://github.com/ansible-collections/community.general/pull/2691). -- gunicorn - search for ``gunicorn`` binary in more paths (https://github.com/ansible-collections/community.general/pull/3092). -- hana_query - added the abillity to use hdbuserstore (https://github.com/ansible-collections/community.general/pull/3125). -- hpilo_info - added ``host_power_status`` return value to report power state of machine with ``OFF``, ``ON`` or ``UNKNOWN`` (https://github.com/ansible-collections/community.general/pull/3079). -- idrac_redfish_config - modified set_manager_attributes function to skip invalid attribute instead of returning. Added skipped attributes to output. Modified module exit to add warning variable (https://github.com/ansible-collections/community.general/issues/1995). -- influxdb_retention_policy - add ``state`` parameter with allowed values ``present`` and ``absent`` to support deletion of existing retention policies (https://github.com/ansible-collections/community.general/issues/2383). -- influxdb_retention_policy - simplify duration logic parsing (https://github.com/ansible-collections/community.general/pull/2385). -- ini_file - add abbility to define multiple options with the same name but different values (https://github.com/ansible-collections/community.general/issues/273, https://github.com/ansible-collections/community.general/issues/1204). -- ini_file - add module option ``exclusive`` (boolean) for the ability to add/remove single ``option=value`` entries without overwriting existing options with the same name but different values (https://github.com/ansible-collections/community.general/pull/3033). -- ini_file - opening file with encoding ``utf-8-sig`` (https://github.com/ansible-collections/community.general/issues/2189). -- interfaces_file - minor refactor (https://github.com/ansible-collections/community.general/pull/3328). -- iocage connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). -- ipa_config - add ``ipaselinuxusermaporder`` option to set the SELinux user map order (https://github.com/ansible-collections/community.general/pull/3178). -- ipa_group - add ``append`` option for adding group and users members, instead of replacing the respective lists (https://github.com/ansible-collections/community.general/pull/3545). -- jail connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). -- java_keystore - added ``ssl_backend`` parameter for using the cryptography library instead of the OpenSSL binary (https://github.com/ansible-collections/community.general/pull/2485). -- java_keystore - replace envvar by stdin to pass secret to ``keytool`` (https://github.com/ansible-collections/community.general/pull/2526). -- jenkins_build - support stopping a running jenkins build (https://github.com/ansible-collections/community.general/pull/2850). -- jenkins_job_info - the ``password`` and ``token`` parameters can also be omitted to retrieve only public information (https://github.com/ansible-collections/community.general/pull/2948). -- jenkins_plugin - add fallback url(s) for failure of plugin installation/download (https://github.com/ansible-collections/community.general/pull/1334). -- jira - add comment visibility parameter for comment operation (https://github.com/ansible-collections/community.general/pull/2556). -- kernel_blacklist - revamped the module using ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/3329). -- keycloak_* modules - refactor many of the ``keycloak_*`` modules to have similar structures, comments, and documentation (https://github.com/ansible-collections/community.general/pull/3280). -- keycloak_authentication - enhanced diff mode to also return before and after state when the authentication flow is updated (https://github.com/ansible-collections/community.general/pull/2963). -- keycloak_client - add ``authentication_flow_binding_overrides`` option (https://github.com/ansible-collections/community.general/pull/2949). -- keycloak_realm - add ``events_enabled`` parameter to allow activation or deactivation of login events (https://github.com/ansible-collections/community.general/pull/3231). -- linode - added proper traceback when failing due to exceptions (https://github.com/ansible-collections/community.general/pull/2410). -- linode - parameter ``additional_disks`` is now validated as a list of dictionaries (https://github.com/ansible-collections/community.general/pull/2410). -- linode inventory plugin - adds the ``ip_style`` configuration key. Set to ``api`` to get more detailed network details back from the remote Linode host (https://github.com/ansible-collections/community.general/pull/3203). -- lxc connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). -- lxd_container - add ``ignore_volatile_options`` option which allows to disable the behavior that the module ignores options starting with ``volatile.`` (https://github.com/ansible-collections/community.general/pull/3331). -- mail - added the ``ehlohost`` parameter which allows for manual override of the host used in SMTP EHLO (https://github.com/ansible-collections/community.general/pull/3425). -- maven_artifact - added ``checksum_alg`` option to support SHA1 checksums in order to support FIPS systems (https://github.com/ansible-collections/community.general/pull/2662). -- module_helper cmd module utils - added the ``ArgFormat`` style ``BOOLEAN_NOT``, to add CLI parameters when the module argument is false-ish (https://github.com/ansible-collections/community.general/pull/3290). -- module_helper module utils - added feature flag parameter to ``CmdMixin`` to control whether ``cmd_args`` is automatically added to the module output (https://github.com/ansible-collections/community.general/pull/3648). -- module_helper module utils - added feature flag parameters to ``CmdMixin`` to control whether ``rc``, ``out`` and ``err`` are automatically added to the module output (https://github.com/ansible-collections/community.general/pull/2922). -- module_helper module utils - break down of the long file into smaller pieces (https://github.com/ansible-collections/community.general/pull/2393). -- module_helper module utils - method ``CmdMixin.run_command()`` now accepts ``process_output`` specifying a function to process the outcome of the underlying ``module.run_command()`` (https://github.com/ansible-collections/community.general/pull/2564). -- module_helper module_utils - added classmethod to trigger the execution of MH modules (https://github.com/ansible-collections/community.general/pull/3206). -- nmcli - add ``disabled`` value to ``method6`` option (https://github.com/ansible-collections/community.general/issues/2730). -- nmcli - add ``dummy`` interface support (https://github.com/ansible-collections/community.general/issues/724). -- nmcli - add ``gre`` tunnel support (https://github.com/ansible-collections/community.general/issues/3105, https://github.com/ansible-collections/community.general/pull/3262). -- nmcli - add ``gsm`` support (https://github.com/ansible-collections/community.general/pull/3313). -- nmcli - add ``routing_rules4`` and ``may_fail4`` options (https://github.com/ansible-collections/community.general/issues/2730). -- nmcli - add ``runner`` and ``runner_hwaddr_policy`` options (https://github.com/ansible-collections/community.general/issues/2901). -- nmcli - add ``wifi-sec`` option change detection to support managing secure Wi-Fi connections (https://github.com/ansible-collections/community.general/pull/3136). -- nmcli - add ``wifi`` option to support managing Wi-Fi settings such as ``hidden`` or ``mode`` (https://github.com/ansible-collections/community.general/pull/3081). -- nmcli - add new options to ignore automatic DNS servers and gateways (https://github.com/ansible-collections/community.general/issues/1087). -- nmcli - query ``nmcli`` directly to determine available WiFi options (https://github.com/ansible-collections/community.general/pull/3141). -- nmcli - remove dead code, ``options`` never contains keys from ``param_alias`` (https://github.com/ansible-collections/community.general/pull/2417). -- nmcli - the option ``routing_rules4`` can now be specified as a list of strings, instead of as a single string (https://github.com/ansible-collections/community.general/issues/3401). -- nrdp callback plugin - parameters are now converted to strings, except ``validate_certs`` which is converted to boolean (https://github.com/ansible-collections/community.general/pull/2878). -- onepassword lookup plugin - add ``domain`` option (https://github.com/ansible-collections/community.general/issues/2734). -- open-iscsi - adding support for mutual authentication between target and initiator (https://github.com/ansible-collections/community.general/pull/3422). -- open_iscsi - add ``auto_portal_startup`` parameter to allow ``node.startup`` setting per portal (https://github.com/ansible-collections/community.general/issues/2685). -- open_iscsi - also consider ``portal`` and ``port`` to check if already logged in or not (https://github.com/ansible-collections/community.general/issues/2683). -- open_iscsi - minor refactoring (https://github.com/ansible-collections/community.general/pull/3286). -- opentelemetry callback plugin - added option ``enable_from_environment`` to support enabling the plugin only if the given environment variable exists and it is set to true (https://github.com/ansible-collections/community.general/pull/3498). -- opentelemetry callback plugin - enriched the span attributes with HTTP metadata for those Ansible tasks that interact with third party systems (https://github.com/ansible-collections/community.general/pull/3448). -- opentelemetry callback plugin - enriched the stacktrace information for loops with the ``message``, ``exception`` and ``stderr`` fields from the failed item in the tasks in addition to the name of the task and failed item (https://github.com/ansible-collections/community.general/pull/3599). -- opentelemetry callback plugin - enriched the stacktrace information with the ``message``, ``exception`` and ``stderr`` fields from the failed task (https://github.com/ansible-collections/community.general/pull/3496). -- opentelemetry callback plugin - transformed args in a list of span attributes in addition it redacted username and password from any URLs (https://github.com/ansible-collections/community.general/pull/3564). -- openwrt_init - minor refactoring (https://github.com/ansible-collections/community.general/pull/3284). -- opkg - allow ``name`` to be a YAML list of strings (https://github.com/ansible-collections/community.general/issues/572, https://github.com/ansible-collections/community.general/pull/3554). -- pacman - add ``executable`` option to use an alternative pacman binary (https://github.com/ansible-collections/community.general/issues/2524). -- pacman - speed up checking if the package is installed, when the latest version check is not needed (https://github.com/ansible-collections/community.general/pull/3606). -- pamd - minor refactorings (https://github.com/ansible-collections/community.general/pull/3285). -- passwordstore lookup - add option ``missing`` to choose what to do if the password file is missing (https://github.com/ansible-collections/community.general/pull/2500). -- pids - refactor to add support for older ``psutil`` versions to the ``pattern`` option (https://github.com/ansible-collections/community.general/pull/3315). -- pipx - minor refactor on the ``changed`` logic (https://github.com/ansible-collections/community.general/pull/3647). -- pkgin - in case of ``pkgin`` tool failue, display returned standard output ``stdout`` and standard error ``stderr`` to ease debugging (https://github.com/ansible-collections/community.general/issues/3146). -- pkgng - ``annotation`` can now also be a YAML list (https://github.com/ansible-collections/community.general/pull/3526). -- pkgng - packages being installed (or upgraded) are acted on in one command (per action) (https://github.com/ansible-collections/community.general/issues/2265). -- pkgng - status message specifies number of packages installed and/or upgraded separately. Previously, all changes were reported as one count of packages "added" (https://github.com/ansible-collections/community.general/pull/3393). -- proxmox inventory plugin - added snapshots to host facts (https://github.com/ansible-collections/community.general/pull/3044). -- proxmox_group_info - minor refactor (https://github.com/ansible-collections/community.general/pull/2557). -- proxmox_kvm - minor refactor (https://github.com/ansible-collections/community.general/pull/2557). -- qubes connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). -- rax_mon_notification_plan - fixed validation checks by specifying type ``str`` as the ``elements`` of parameters ``ok_state``, ``warning_state`` and ``critical_state`` (https://github.com/ansible-collections/community.general/pull/2955). -- redfish_command - add ``boot_override_mode`` argument to BootSourceOverride commands (https://github.com/ansible-collections/community.general/issues/3134). -- redfish_command and redfish_config and redfish_utils module utils - add parameter to strip etag of quotes before patch, since some vendors do not properly ``If-Match`` etag with quotes (https://github.com/ansible-collections/community.general/pull/3296). -- redfish_config - modified module exit to add warning variable (https://github.com/ansible-collections/community.general/issues/1995). -- redfish_info - include ``Status`` property for Thermal objects when querying Thermal properties via ``GetChassisThermals`` command (https://github.com/ansible-collections/community.general/issues/3232). -- redfish_utils module utils - modified set_bios_attributes function to skip invalid attribute instead of returning. Added skipped attributes to output (https://github.com/ansible-collections/community.general/issues/1995). -- redhat_subscription - add ``server_prefix`` and ``server_port`` parameters (https://github.com/ansible-collections/community.general/pull/2779). -- redis - allow to use the term ``replica`` instead of ``slave``, which has been the official Redis terminology since 2018 (https://github.com/ansible-collections/community.general/pull/2867). -- rhevm - minor refactor (https://github.com/ansible-collections/community.general/pull/2557). -- saltstack connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). -- scaleway plugin inventory - parse scw-cli config file for ``oauth_token`` (https://github.com/ansible-collections/community.general/pull/3250). -- serverless - minor refactor (https://github.com/ansible-collections/community.general/pull/2557). -- slack - minor refactoring (https://github.com/ansible-collections/community.general/pull/3205). -- snap - added ``enabled`` and ``disabled`` states (https://github.com/ansible-collections/community.general/issues/1990). -- snap - improved module error handling, especially for the case when snap server is down (https://github.com/ansible-collections/community.general/issues/2970). -- splunk callback plugin - add ``batch`` option for user-configurable correlation ID's (https://github.com/ansible-collections/community.general/issues/2790). -- spotinst_aws_elastigroup - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/2355). -- ssh_config - new feature to set ``ForwardAgent`` option to ``yes`` or ``no`` (https://github.com/ansible-collections/community.general/issues/2473). -- stacki_host - minor refactoring (https://github.com/ansible-collections/community.general/pull/2681). -- supervisorctl - add the possibility to restart all programs and program groups (https://github.com/ansible-collections/community.general/issues/3551). -- supervisorctl - using standard Ansible mechanism to validate ``signalled`` state required parameter (https://github.com/ansible-collections/community.general/pull/3068). -- terraform - add ``check_destroy`` optional parameter to check for deletion of resources before it is applied (https://github.com/ansible-collections/community.general/pull/2874). -- terraform - add ``parallelism`` parameter (https://github.com/ansible-collections/community.general/pull/3540). -- terraform - add option ``overwrite_init`` to skip init if exists (https://github.com/ansible-collections/community.general/pull/2573). -- terraform - minor refactor (https://github.com/ansible-collections/community.general/pull/2557). -- timezone - print error message to debug instead of warning when timedatectl fails (https://github.com/ansible-collections/community.general/issues/1942). -- tss lookup plugin - added ``token`` parameter for token authorization; ``username`` and ``password`` are optional when ``token`` is provided (https://github.com/ansible-collections/community.general/pull/3327). -- tss lookup plugin - added new parameter for domain authorization (https://github.com/ansible-collections/community.general/pull/3228). -- tss lookup plugin - refactored to decouple the supporting third-party library (``python-tss-sdk``) (https://github.com/ansible-collections/community.general/pull/3252). -- ufw - if ``delete=true`` and ``insert`` option is present, then ``insert`` is now ignored rather than failing with a syntax error (https://github.com/ansible-collections/community.general/pull/3514). -- vdo - minor refactoring of the code (https://github.com/ansible-collections/community.general/pull/3191). -- zfs - added diff mode support (https://github.com/ansible-collections/community.general/pull/502). -- zfs_delegate_admin - drop choices from permissions, allowing any permission supported by the underlying zfs commands (https://github.com/ansible-collections/community.general/pull/2540). -- zone connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). -- zpool_facts - minor refactoring (https://github.com/ansible-collections/community.general/pull/3332). -- zypper - prefix zypper commands with ``/sbin/transactional-update --continue --drop-if-no-change --quiet run`` if transactional updates are detected (https://github.com/ansible-collections/community.general/issues/3159). - -Breaking Changes / Porting Guide --------------------------------- - -- archive - adding idempotency checks for changes to file names and content within the ``destination`` file (https://github.com/ansible-collections/community.general/pull/3075). -- lxd inventory plugin - when used with Python 2, the plugin now needs ``ipaddress`` installed `from pypi `_ (https://github.com/ansible-collections/community.general/pull/2441). -- scaleway_security_group_rule - when used with Python 2, the module now needs ``ipaddress`` installed `from pypi `_ (https://github.com/ansible-collections/community.general/pull/2441). - -Deprecated Features -------------------- - -- ali_instance_info - marked removal version of deprecated parameters ``availability_zone`` and ``instance_names`` (https://github.com/ansible-collections/community.general/issues/2429). -- bitbucket_* modules - ``username`` options have been deprecated in favor of ``workspace`` and will be removed in community.general 6.0.0 (https://github.com/ansible-collections/community.general/pull/2045). -- dnsimple - python-dnsimple < 2.0.0 is deprecated and support for it will be removed in community.general 5.0.0 (https://github.com/ansible-collections/community.general/pull/2946#discussion_r667624693). -- gitlab_group_members - setting ``gitlab_group`` to ``name`` or ``path`` is deprecated. Use ``full_path`` instead (https://github.com/ansible-collections/community.general/pull/3451). -- keycloak_authentication - the return value ``flow`` is now deprecated and will be removed in community.general 6.0.0; use ``end_state`` instead (https://github.com/ansible-collections/community.general/pull/3280). -- keycloak_group - the return value ``group`` is now deprecated and will be removed in community.general 6.0.0; use ``end_state`` instead (https://github.com/ansible-collections/community.general/pull/3280). -- linode - parameter ``backupsenabled`` is deprecated and will be removed in community.general 5.0.0 (https://github.com/ansible-collections/community.general/pull/2410). -- lxd_container - the current default value ``true`` of ``ignore_volatile_options`` is deprecated and will change to ``false`` in community.general 6.0.0 (https://github.com/ansible-collections/community.general/pull/3429). -- serverless - deprecating parameter ``functions`` because it was not used in the code (https://github.com/ansible-collections/community.general/pull/2845). -- xfconf - deprecate the ``get`` state. The new module ``xfconf_info`` should be used instead (https://github.com/ansible-collections/community.general/pull/3049). - -Removed Features (previously deprecated) ----------------------------------------- - -- All inventory and vault scripts contained in community.general were moved to the `contrib-scripts GitHub repository `_ (https://github.com/ansible-collections/community.general/pull/2696). -- ModuleHelper module utils - remove fallback when value could not be determined for a parameter (https://github.com/ansible-collections/community.general/pull/3461). -- Removed deprecated netapp module utils and doc fragments (https://github.com/ansible-collections/community.general/pull/3197). -- The nios, nios_next_ip, nios_next_network lookup plugins, the nios documentation fragment, and the nios_host_record, nios_ptr_record, nios_mx_record, nios_fixed_address, nios_zone, nios_member, nios_a_record, nios_aaaa_record, nios_network, nios_dns_view, nios_txt_record, nios_naptr_record, nios_srv_record, nios_cname_record, nios_nsgroup, and nios_network_view module have been removed from community.general 4.0.0 and were replaced by redirects to the `infoblox.nios_modules `_ collection. Please install the ``infoblox.nios_modules`` collection to continue using these plugins and modules, and update your FQCNs (https://github.com/ansible-collections/community.general/pull/3592). -- The vendored copy of ``ipaddress`` has been removed. Please use ``ipaddress`` from the Python 3 standard library, or `from pypi `_. (https://github.com/ansible-collections/community.general/pull/2441). -- cpanm - removed the deprecated ``system_lib`` option. Use Ansible's privilege escalation mechanism instead; the option basically used ``sudo`` (https://github.com/ansible-collections/community.general/pull/3461). -- grove - removed the deprecated alias ``message`` of the ``message_content`` option (https://github.com/ansible-collections/community.general/pull/3461). -- proxmox - default value of ``proxmox_default_behavior`` changed to ``no_defaults`` (https://github.com/ansible-collections/community.general/pull/3461). -- proxmox_kvm - default value of ``proxmox_default_behavior`` changed to ``no_defaults`` (https://github.com/ansible-collections/community.general/pull/3461). -- runit - removed the deprecated ``dist`` option which was not used by the module (https://github.com/ansible-collections/community.general/pull/3461). -- telegram - removed the deprecated ``msg``, ``msg_format`` and ``chat_id`` options (https://github.com/ansible-collections/community.general/pull/3461). -- xfconf - the default value of ``disable_facts`` changed to ``true``, and the value ``false`` is no longer allowed. Register the module results instead (https://github.com/ansible-collections/community.general/pull/3461). - -Security Fixes --------------- - -- nmcli - do not pass WiFi secrets on the ``nmcli`` command line. Use ``nmcli con edit`` instead and pass secrets as ``stdin`` (https://github.com/ansible-collections/community.general/issues/3145). - -Bugfixes --------- - -- _mount module utils - fixed the sanity checks (https://github.com/ansible-collections/community.general/pull/2883). -- ali_instance_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). -- ansible_galaxy_install - the output value ``cmd_args`` was bringing the intermediate command used to gather the state, instead of the command that actually performed the state change (https://github.com/ansible-collections/community.general/pull/3655). -- apache2_module - fix ``a2enmod``/``a2dismod`` detection, and error message when not found (https://github.com/ansible-collections/community.general/issues/3253). -- archive - fixed ``exclude_path`` values causing incorrect archive root (https://github.com/ansible-collections/community.general/pull/2816). -- archive - fixed improper file names for single file zip archives (https://github.com/ansible-collections/community.general/issues/2818). -- archive - fixed incorrect ``state`` result value documentation (https://github.com/ansible-collections/community.general/pull/2816). -- archive - fixed task failure when using the ``remove`` option with a ``path`` containing nested files for ``format``s other than ``zip`` (https://github.com/ansible-collections/community.general/issues/2919). -- archive - fixing archive root determination when longest common root is ``/`` (https://github.com/ansible-collections/community.general/pull/3036). -- composer - use ``no-interaction`` option when discovering available options to avoid an issue where composer hangs (https://github.com/ansible-collections/community.general/pull/2348). -- consul_acl - update the hcl allowlist to include all supported options (https://github.com/ansible-collections/community.general/pull/2495). -- consul_kv lookup plugin - allow to set ``recurse``, ``index``, ``datacenter`` and ``token`` as keyword arguments (https://github.com/ansible-collections/community.general/issues/2124). -- copr - fix chroot naming issues, ``centos-stream`` changed naming to ``centos-stream-`` (for exmaple ``centos-stream-8``) (https://github.com/ansible-collections/community.general/issues/2084, https://github.com/ansible-collections/community.general/pull/3237). -- cpanm - also use ``LC_ALL`` to enforce locale choice (https://github.com/ansible-collections/community.general/pull/2731). -- deploy_helper - improved parameter checking by using standard Ansible construct (https://github.com/ansible-collections/community.general/pull/3104). -- django_manage - argument ``command`` is being splitted again as it should (https://github.com/ansible-collections/community.general/issues/3215). -- django_manage - parameters ``apps`` and ``fixtures`` are now splitted instead of being used as a single argument (https://github.com/ansible-collections/community.general/issues/3333). -- django_manage - refactor to call ``run_command()`` passing command as a list instead of string (https://github.com/ansible-collections/community.general/pull/3098). -- ejabberd_user - replaced in-code check with ``required_if``, using ``get_bin_path()`` for the command, passing args to ``run_command()`` as list instead of string (https://github.com/ansible-collections/community.general/pull/3093). -- filesystem - repair ``reiserfs`` fstype support after adding it to integration tests (https://github.com/ansible-collections/community.general/pull/2472). -- gitlab_deploy_key - fix idempotency on projects with multiple deploy keys (https://github.com/ansible-collections/community.general/pull/3473). -- gitlab_deploy_key - fix the SSH Deploy Key being deleted accidentally while running task in check mode (https://github.com/ansible-collections/community.general/issues/3621, https://github.com/ansible-collections/community.general/pull/3622). -- gitlab_group - avoid passing wrong value for ``require_two_factor_authentication`` on creation when the option has not been specified (https://github.com/ansible-collections/community.general/pull/3453). -- gitlab_group_members - ``get_group_id`` return the group ID by matching ``full_path``, ``path`` or ``name`` (https://github.com/ansible-collections/community.general/pull/3400). -- gitlab_group_members - fixes issue when gitlab group has more then 20 members, pagination problem (https://github.com/ansible-collections/community.general/issues/3041). -- gitlab_project - user projects are created using namespace ID now, instead of user ID (https://github.com/ansible-collections/community.general/pull/2881). -- gitlab_project_members - ``get_project_id`` return the project id by matching ``full_path`` or ``name`` (https://github.com/ansible-collections/community.general/pull/3602). -- gitlab_project_members - fixes issue when gitlab group has more then 20 members, pagination problem (https://github.com/ansible-collections/community.general/issues/3041). -- idrac_redfish_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). -- influxdb_retention_policy - fix bug where ``INF`` duration values failed parsing (https://github.com/ansible-collections/community.general/pull/2385). -- influxdb_user - allow creation of admin users when InfluxDB authentication is enabled but no other user exists on the database. In this scenario, InfluxDB 1.x allows only ``CREATE USER`` queries and rejects any other query (https://github.com/ansible-collections/community.general/issues/2364). -- influxdb_user - fix bug where an influxdb user has no privileges for 2 or more databases (https://github.com/ansible-collections/community.general/pull/2499). -- influxdb_user - fix bug which removed current privileges instead of appending them to existing ones (https://github.com/ansible-collections/community.general/issues/2609, https://github.com/ansible-collections/community.general/pull/2614). -- ini_file - fix Unicode processing for Python 2 (https://github.com/ansible-collections/community.general/pull/2875). -- ini_file - fix inconsistency between empty value and no value (https://github.com/ansible-collections/community.general/issues/3031). -- interfaces_file - no longer reporting change when none happened (https://github.com/ansible-collections/community.general/pull/3328). -- inventory and vault scripts - change file permissions to make vendored inventory and vault scripts exectuable (https://github.com/ansible-collections/community.general/pull/2337). -- ipa_* modules - fix environment fallback for ``ipa_host`` option (https://github.com/ansible-collections/community.general/issues/3560). -- ipa_sudorule - call ``sudorule_add_allow_command`` method instead of ``sudorule_add_allow_command_group`` (https://github.com/ansible-collections/community.general/issues/2442). -- iptables_state - call ``async_status`` action plugin rather than its module (https://github.com/ansible-collections/community.general/issues/2700). -- iptables_state - fix a 'FutureWarning' in a regex and do some basic code clean up (https://github.com/ansible-collections/community.general/pull/2525). -- iptables_state - fix a broken query of ``async_status`` result with current ansible-core development version (https://github.com/ansible-collections/community.general/issues/2627, https://github.com/ansible-collections/community.general/pull/2671). -- iptables_state - fix initialization of iptables from null state when adressing more than one table (https://github.com/ansible-collections/community.general/issues/2523). -- java_cert - fix issue with incorrect alias used on PKCS#12 certificate import (https://github.com/ansible-collections/community.general/pull/2560). -- java_cert - import private key as well as public certificate from PKCS#12 (https://github.com/ansible-collections/community.general/issues/2460). -- java_keystore - add parameter ``keystore_type`` to control output file format and override ``keytool``'s default, which depends on Java version (https://github.com/ansible-collections/community.general/issues/2515). -- jboss - fix the deployment file permission issue when Jboss server is running under non-root user. The deployment file is copied with file content only. The file permission is set to ``440`` and belongs to root user. When the JBoss ``WildFly`` server is running under non-root user, it is unable to read the deployment file (https://github.com/ansible-collections/community.general/pull/3426). -- jenkins_build - examine presence of ``build_number`` before deleting a jenkins build (https://github.com/ansible-collections/community.general/pull/2850). -- jenkins_plugin - use POST method for sending request to jenkins API when ``state`` option is one of ``enabled``, ``disabled``, ``pinned``, ``unpinned``, or ``absent`` (https://github.com/ansible-collections/community.general/issues/2510). -- json_query filter plugin - avoid 'unknown type' errors for more Ansible internal types (https://github.com/ansible-collections/community.general/pull/2607). -- keycloak_authentication - fix bug when two identical executions are in the same authentication flow (https://github.com/ansible-collections/community.general/pull/2904). -- keycloak_authentication - fix bug, the requirement was always on ``DISABLED`` when creating a new authentication flow (https://github.com/ansible-collections/community.general/pull/3330). -- keycloak_client - update the check mode to not show differences resulting from sorting and default values relating to the properties, ``redirectUris``, ``attributes``, and ``protocol_mappers`` (https://github.com/ansible-collections/community.general/pull/3610). -- keycloak_identity_provider - fix change detection when updating identity provider mappers (https://github.com/ansible-collections/community.general/pull/3538, https://github.com/ansible-collections/community.general/issues/3537). -- keycloak_realm - ``ssl_required`` changed from a boolean type to accept the strings ``none``, ``external`` or ``all``. This is not a breaking change since the module always failed when a boolean was supplied (https://github.com/ansible-collections/community.general/pull/2693). -- keycloak_realm - element type for ``events_listeners`` parameter should be ``string`` instead of ``dict`` (https://github.com/ansible-collections/community.general/pull/3231). -- keycloak_realm - remove warning that ``reset_password_allowed`` needs to be marked as ``no_log`` (https://github.com/ansible-collections/community.general/pull/2694). -- keycloak_role - quote role name when used in URL path to avoid errors when role names contain special characters (https://github.com/ansible-collections/community.general/issues/3535, https://github.com/ansible-collections/community.general/pull/3536). -- launchd - fixed sanity check in the module's code (https://github.com/ansible-collections/community.general/pull/2960). -- launchd - use private attribute to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194). -- linode inventory plugin - fix default value of new option ``ip_style`` (https://github.com/ansible-collections/community.general/issues/3337). -- linode_v4 - changed the error message to point to the correct bugtracker URL (https://github.com/ansible-collections/community.general/pull/2430). -- logdns callback plugin - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194). -- logstash callback plugin - replace ``_option`` with ``context.CLIARGS`` to fix the plugin on ansible-base and ansible-core (https://github.com/ansible-collections/community.general/issues/2692). -- lvol - fixed rounding errors (https://github.com/ansible-collections/community.general/issues/2370). -- lvol - fixed size unit capitalization to match units used between different tools for comparison (https://github.com/ansible-collections/community.general/issues/2360). -- lvol - honor ``check_mode`` on thinpool (https://github.com/ansible-collections/community.general/issues/2934). -- macports - add ``stdout`` and ``stderr`` to return values (https://github.com/ansible-collections/community.general/issues/3499). -- maven_artifact - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194). -- memcached cache plugin - change function argument names to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194). -- memset_memstore_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). -- memset_server_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). -- modprobe - added additional checks to ensure module load/unload is effective (https://github.com/ansible-collections/community.general/issues/1608). -- module_helper module utils - ``CmdMixin`` must also use ``LC_ALL`` to enforce locale choice (https://github.com/ansible-collections/community.general/pull/2731). -- module_helper module utils - avoid failing when non-zero ``rc`` is present on regular exit (https://github.com/ansible-collections/community.general/pull/2912). -- module_helper module utils - fixed change-tracking for dictionaries and lists (https://github.com/ansible-collections/community.general/pull/2951). -- netapp module utils - remove always-true conditional to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194). -- netcup_dns - use ``str(ex)`` instead of unreliable ``ex.message`` in exception handling to fix ``AttributeError`` in error cases (https://github.com/ansible-collections/community.general/pull/2590). -- nmap inventory plugin - fix local variable error when cache is disabled (https://github.com/ansible-collections/community.general/issues/2512). -- nmcli - added ip4/ip6 configuration arguments for ``sit`` and ``ipip`` tunnels (https://github.com/ansible-collections/community.general/issues/3238, https://github.com/ansible-collections/community.general/pull/3239). -- nmcli - compare MAC addresses case insensitively to fix idempotency issue (https://github.com/ansible-collections/community.general/issues/2409). -- nmcli - fixed ``dns6`` option handling so that it is treated as a list internally (https://github.com/ansible-collections/community.general/pull/3563). -- nmcli - fixed ``ipv4.route-metric`` being in properties of type list (https://github.com/ansible-collections/community.general/pull/3563). -- nmcli - fixes team-slave configuration by adding connection.slave-type (https://github.com/ansible-collections/community.general/issues/766). -- nmcli - if type is ``bridge-slave`` add ``slave-type bridge`` to ``nmcli`` command (https://github.com/ansible-collections/community.general/issues/2408). -- npm - correctly handle cases where a dependency does not have a ``version`` property because it is either missing or invalid (https://github.com/ansible-collections/community.general/issues/2917). -- npm - when the ``version`` option is used the comparison of installed vs missing will use name@version instead of just name, allowing version specific updates (https://github.com/ansible-collections/community.general/issues/2021). -- one_image - fix error message when renaming an image (https://github.com/ansible-collections/community.general/pull/3626). -- one_template - change function argument name to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194). -- one_vm - Allow missing NIC keys (https://github.com/ansible-collections/community.general/pull/2435). -- oneview_datacenter_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). -- oneview_enclosure_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). -- oneview_ethernet_network_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). -- oneview_fc_network_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). -- oneview_fcoe_network_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). -- oneview_logical_interconnect_group_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). -- oneview_network_set_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). -- oneview_san_manager_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). -- online inventory plugin - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194). -- online module utils - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194). -- open_iscsi - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3286). -- openbsd_pkg - fix crash from ``KeyError`` exception when package installs, but ``pkg_add`` returns with a non-zero exit code (https://github.com/ansible-collections/community.general/pull/3336). -- openbsd_pkg - fix regexp matching crash. This bug could trigger on package names with special characters, for example ``g++`` (https://github.com/ansible-collections/community.general/pull/3161). -- opentelemetry callback plugin - validated the task result exception without crashing. Also simplifying code a bit (https://github.com/ansible-collections/community.general/pull/3450, https://github.com/ansible/ansible/issues/75726). -- openwrt_init - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3284). -- ovir4 inventory script - improve configparser creation to avoid crashes for options without values (https://github.com/ansible-collections/community.general/issues/674). -- packet_device - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194). -- packet_sshkey - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194). -- pacman - fix changed status when ignorepkg has been defined (https://github.com/ansible-collections/community.general/issues/1758). -- pamd - code for ``state=updated`` when dealing with the pam module arguments, made no distinction between ``None`` and an empty list (https://github.com/ansible-collections/community.general/issues/3260). -- pamd - fixed problem with files containing only one or two lines (https://github.com/ansible-collections/community.general/issues/2925). -- pids - avoid crashes for older ``psutil`` versions, like on RHEL6 and RHEL7 (https://github.com/ansible-collections/community.general/pull/2808). -- pipx - ``state=inject`` was failing to parse the list of injected packages (https://github.com/ansible-collections/community.general/pull/3611). -- pipx - set environment variable ``USE_EMOJI=0`` to prevent errors in platforms that do not support ``UTF-8`` (https://github.com/ansible-collections/community.general/pull/3611). -- pipx - the output value ``cmd_args`` was bringing the intermediate command used to gather the state, instead of the command that actually performed the state change (https://github.com/ansible-collections/community.general/pull/3655). -- pkgin - Fix exception encountered when all packages are already installed (https://github.com/ansible-collections/community.general/pull/3583). -- pkgng - ``name=* state=latest`` check for upgrades did not count "Number of packages to be reinstalled" as a `changed` action, giving incorrect results in both regular and check mode (https://github.com/ansible-collections/community.general/pull/3526). -- pkgng - an `earlier PR `_ broke check mode so that the module always reports `not changed`. This is now fixed so that the module reports number of upgrade or install actions that would be performed (https://github.com/ansible-collections/community.general/pull/3526). -- pkgng - the ``annotation`` functionality was broken and is now fixed, and now also works with check mode (https://github.com/ansible-collections/community.general/pull/3526). -- proxmox inventory plugin - fixed parsing failures when some cluster nodes are offline (https://github.com/ansible-collections/community.general/issues/2931). -- proxmox inventory plugin - fixed plugin failure when a ``qemu`` guest has no ``template`` key (https://github.com/ansible-collections/community.general/pull/3052). -- proxmox_group_info - fix module crash if a ``group`` parameter is used (https://github.com/ansible-collections/community.general/pull/3649). -- proxmox_kvm - clone operation should return the VMID of the target VM and not that of the source VM. This was failing when the target VM with the chosen name already existed (https://github.com/ansible-collections/community.general/pull/3266). -- proxmox_kvm - fix parsing of Proxmox VM information with device info not containing a comma, like disks backed by ZFS zvols (https://github.com/ansible-collections/community.general/issues/2840). -- proxmox_kvm - fix result of clone, now returns ``newid`` instead of ``vmid`` (https://github.com/ansible-collections/community.general/pull/3034). -- proxmox_kvm - fixed ``vmid`` return value when VM with ``name`` already exists (https://github.com/ansible-collections/community.general/issues/2648). -- puppet - replace ``console` with ``stdout`` in ``logdest`` option when ``all`` has been chosen (https://github.com/ansible-collections/community.general/issues/1190). -- rax_facts - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). -- redfish_command - fix extraneous error caused by missing ``bootdevice`` argument when using the ``DisableBootOverride`` sub-command (https://github.com/ansible-collections/community.general/issues/3005). -- redfish_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). -- redfish_utils module utils - do not attempt to change the boot source override mode if not specified by the user (https://github.com/ansible-collections/community.general/issues/3509/). -- redfish_utils module utils - if a manager network property is not specified in the service, attempt to change the requested settings (https://github.com/ansible-collections/community.general/issues/3404/). -- redfish_utils module utils - if given, add account ID of user that should be created to HTTP request (https://github.com/ansible-collections/community.general/pull/3343/). -- redis cache - improved connection string parsing (https://github.com/ansible-collections/community.general/issues/497). -- rhsm_release - fix the issue that module considers 8, 7Client and 7Workstation as invalid releases (https://github.com/ansible-collections/community.general/pull/2571). -- saltstack connection plugin - fix function signature (https://github.com/ansible-collections/community.general/pull/3194). -- scaleway module utils - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194). -- scaleway plugin inventory - fix ``JSON object must be str, not 'bytes'`` with Python 3.5 (https://github.com/ansible-collections/community.general/issues/2769). -- smartos_image_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). -- snap - also use ``LC_ALL`` to enforce locale choice (https://github.com/ansible-collections/community.general/pull/2731). -- snap - fix formatting of ``--channel`` argument when the ``channel`` option is used (https://github.com/ansible-collections/community.general/pull/3028). -- snap - fix various bugs which prevented the module from working at all, and which resulted in ``state=absent`` fail on absent snaps (https://github.com/ansible-collections/community.general/issues/2835, https://github.com/ansible-collections/community.general/issues/2906, https://github.com/ansible-collections/community.general/pull/2912). -- snap - fixed the order of the ``--classic`` parameter in the command line invocation (https://github.com/ansible-collections/community.general/issues/2916). -- snap_alias - the output value ``cmd_args`` was bringing the intermediate command used to gather the state, instead of the command that actually performed the state change (https://github.com/ansible-collections/community.general/pull/3655). -- snmp_facts - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). -- ssh_config - reduce stormssh searches based on host (https://github.com/ansible-collections/community.general/pull/2568/). -- stacki_host - when adding a new server, ``rack`` and ``rank`` must be passed, and network parameters are optional (https://github.com/ansible-collections/community.general/pull/2681). -- stackpath_compute inventory script - fix broken validation checks for client ID and client secret (https://github.com/ansible-collections/community.general/pull/2448). -- supervisorctl - state ``signalled`` was not working (https://github.com/ansible-collections/community.general/pull/3068). -- svr4pkg - convert string to a bytes-like object to avoid ``TypeError`` with Python 3 (https://github.com/ansible-collections/community.general/issues/2373). -- taiga - some constructs in the module fixed to work also in Python 3 (https://github.com/ansible-collections/community.general/pull/3067). -- terraform - ensure the workspace is set back to its previous value when the apply fails (https://github.com/ansible-collections/community.general/pull/2634). -- tss lookup plugin - fixed backwards compatibility issue with ``python-tss-sdk`` version <=0.0.5 (https://github.com/ansible-collections/community.general/issues/3192, https://github.com/ansible-collections/community.general/pull/3199). -- tss lookup plugin - fixed incompatibility with ``python-tss-sdk`` version 1.0.0 (https://github.com/ansible-collections/community.general/issues/3057, https://github.com/ansible-collections/community.general/pull/3139). -- udm_dns_record - fixed managing of PTR records, which can never have worked before (https://github.com/ansible-collections/community.general/pull/3256). -- ufw - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194). -- utm_aaa_group_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). -- utm_ca_host_key_cert_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). -- utm_network_interface_address_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). -- utm_proxy_frontend_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). -- utm_proxy_location_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). -- vdo - boolean arguments now compared with proper ``true`` and ``false`` values instead of string representations like ``"yes"`` or ``"no"`` (https://github.com/ansible-collections/community.general/pull/3191). -- xenserver_facts - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). -- xfconf - also use ``LC_ALL`` to enforce locale choice (https://github.com/ansible-collections/community.general/issues/2715). -- xfconf_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). -- yaml callback plugin - avoid modifying PyYAML so that other plugins using it on the controller, like the ``to_yaml`` filter, do not produce different output (https://github.com/ansible-collections/community.general/issues/3471, https://github.com/ansible-collections/community.general/pull/3478). -- yum_versionlock - fix idempotency when using wildcard (asterisk) in ``name`` option (https://github.com/ansible-collections/community.general/issues/2761). -- zfs - certain ZFS properties, especially sizes, would lead to a task being falsely marked as "changed" even when no actual change was made (https://github.com/ansible-collections/community.general/issues/975, https://github.com/ansible-collections/community.general/pull/2454). -- zfs - treated received properties as local (https://github.com/ansible-collections/community.general/pull/502). -- zypper_repository - fix idempotency on adding repository with ``$releasever`` and ``$basearch`` variables (https://github.com/ansible-collections/community.general/issues/1985). -- zypper_repository - when an URL to a .repo file was provided in option ``repo=`` and ``state=present`` only the first run was successful, future runs failed due to missing checks prior starting zypper. Usage of ``state=absent`` in combination with a .repo file was not working either (https://github.com/ansible-collections/community.general/issues/1791, https://github.com/ansible-collections/community.general/issues/3466). - -New Plugins ------------ - -Callback -~~~~~~~~ - -- elastic - Create distributed traces for each Ansible task in Elastic APM -- opentelemetry - Create distributed traces with OpenTelemetry - -Filter -~~~~~~ - -- groupby_as_dict - Transform a sequence of dictionaries to a dictionary where the dictionaries are indexed by an attribute -- unicode_normalize - Normalizes unicode strings to facilitate comparison of characters with normalized forms - -Inventory -~~~~~~~~~ - -- icinga2 - Icinga2 inventory source -- opennebula - OpenNebula inventory source - -Lookup -~~~~~~ - -- collection_version - Retrieves the version of an installed collection -- dependent - Composes a list with nested elements of other lists or dicts which can depend on previous loop variables -- random_pet - Generates random pet names -- random_string - Generates random string -- random_words - Return a number of random words - -Test -~~~~ - -- a_module - Check whether the given string refers to an available module or action plugin - -New Modules ------------ - -Cloud -~~~~~ - -misc -^^^^ - -- proxmox_nic - Management of a NIC of a Qemu(KVM) VM in a Proxmox VE cluster. -- proxmox_tasks_info - Retrieve information about one or more Proxmox VE tasks - -Database -~~~~~~~~ - -misc -^^^^ - -- redis_data - Set key value pairs in Redis -- redis_data_incr - Increment keys in Redis -- redis_data_info - Get value of key in Redis database - -mssql -^^^^^ - -- mssql_script - Execute SQL scripts on a MSSQL database - -saphana -^^^^^^^ - -- hana_query - Execute SQL on HANA - -Files -~~~~~ - -- sapcar_extract - Manages SAP SAPCAR archives - -Identity -~~~~~~~~ - -keycloak -^^^^^^^^ - -- keycloak_authentication - Configure authentication in Keycloak -- keycloak_client_rolemapping - Allows administration of Keycloak client_rolemapping with the Keycloak API -- keycloak_clientscope - Allows administration of Keycloak client_scopes via Keycloak API -- keycloak_identity_provider - Allows administration of Keycloak identity providers via Keycloak API -- keycloak_role - Allows administration of Keycloak roles via Keycloak API -- keycloak_user_federation - Allows administration of Keycloak user federations via Keycloak API - -Notification -~~~~~~~~~~~~ - -- discord - Send Discord messages - -Packaging -~~~~~~~~~ - -language -^^^^^^^^ - -- ansible_galaxy_install - Install Ansible roles or collections using ansible-galaxy -- pipx - Manages applications installed with pipx - -os -^^ - -- dnf_versionlock - Locks package versions in C(dnf) based systems -- pacman_key - Manage pacman's list of trusted keys -- snap_alias - Manages snap aliases - -Source Control -~~~~~~~~~~~~~~ - -gitlab -^^^^^^ - -- gitlab_protected_branch - (un)Marking existing branches for protection - -System -~~~~~~ - -- sap_task_list_execute - Perform SAP Task list execution -- xfconf_info - Retrieve XFCE4 configurations - -Web Infrastructure -~~~~~~~~~~~~~~~~~~ - -- rundeck_job_executions_info - Query executions for a Rundeck job -- rundeck_job_run - Run a Rundeck job diff --git a/ansible_collections/community/general/FILES.json b/ansible_collections/community/general/FILES.json deleted file mode 100644 index 3f038dbf..00000000 --- a/ansible_collections/community/general/FILES.json +++ /dev/null @@ -1,25261 +0,0 @@ -{ - "files": [ - { - "name": ".", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": ".azure-pipelines", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": ".azure-pipelines/scripts", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": ".azure-pipelines/scripts/aggregate-coverage.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "432bb55a22ee5b1b7aeb57eb6474c9ac0eb70db442456616205453d01584392c", - "format": 1 - }, - { - "name": ".azure-pipelines/scripts/combine-coverage.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e34d4e863a65b9f53c4ca8ae37655858969898a949e050e9cb3cb0d5f02342d0", - "format": 1 - }, - { - "name": ".azure-pipelines/scripts/process-results.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c03d7273fe58882a439b6723e92ab89f1e127772b5ce35aa67c546dd62659741", - "format": 1 - }, - { - "name": ".azure-pipelines/scripts/publish-codecov.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d690f98e8db0d0020dbadb4d7012bf9e27c7b37bd91e3d7bce3f17d1b69b335d", - "format": 1 - }, - { - "name": ".azure-pipelines/scripts/report-coverage.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0134b8f21933faca559c36c7551eb9f7aca849a09fa575ff16627c33bd317c42", - "format": 1 - }, - { - "name": ".azure-pipelines/scripts/run-tests.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cb08a3ec5715b00d476ae6d63ca22e11a9ad8887239439937d2a7ea342e5a623", - "format": 1 - }, - { - "name": ".azure-pipelines/scripts/time-command.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0232f415efeb583ddff907c058986963b775441eaf129d7162aee0acb0d36834", - "format": 1 - }, - { - "name": ".azure-pipelines/templates", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": ".azure-pipelines/templates/coverage.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "270c97c0b91869f4bf2ff350d9b703382d2032c1a8321e5142e75085409c87de", - "format": 1 - }, - { - "name": ".azure-pipelines/templates/matrix.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4fb0d3ffb2125d5806c7597e4f9d4b2af69cf8c337e9d57803081eddd4a6b081", - "format": 1 - }, - { - "name": ".azure-pipelines/templates/test.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2cfa1271f94c71f05ffa0b1f763d8946394b5636e14579cda8ee14bb38bbcf1c", - "format": 1 - }, - { - "name": ".azure-pipelines/README.md", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "61f20decd3c8fb34ac2cc6ff79f598fc5136e642130a7ba065ccc5aa37960cd2", - "format": 1 - }, - { - "name": ".azure-pipelines/azure-pipelines.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f326a40059db446b4d5ded0aa25a449b1ca889f96cd2084c2672afb2b5a5cdce", - "format": 1 - }, - { - "name": ".github", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": ".github/ISSUE_TEMPLATE", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": ".github/ISSUE_TEMPLATE/bug_report.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8063576d1a2b7efbc22829be27042dc843eb6f3b1c1862663823aeff9c7071bb", - "format": 1 - }, - { - "name": ".github/ISSUE_TEMPLATE/config.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2e5f08c57601d637ec507daec616f993993d16f51892ca62214932b4fad0dcd9", - "format": 1 - }, - { - "name": ".github/ISSUE_TEMPLATE/documentation_report.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "085b4f03c46b46d5e370727e0c1561c24c4e1a3f625a1cf436a7e3d5649f686d", - "format": 1 - }, - { - "name": ".github/ISSUE_TEMPLATE/feature_request.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c6b3fc179291972b5ed5b9a1f7d66db88ea95fd24ae4f84500fc253dd4f6e5ba", - "format": 1 - }, - { - "name": ".github/workflows", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": ".github/workflows/codeql-analysis.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b03191ab8e81273883b6d5eb8ac4ff0a216cd2e3a11f46c9c15553ff9f0c5fcd", - "format": 1 - }, - { - "name": ".github/BOTMETA.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f33ce93f8b6b7dc02243acfb94cb69b637dbe8ed7967032bfa277c4e4f61e106", - "format": 1 - }, - { - "name": ".github/dependabot.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4db28cbf4c9935cd6c08296f484f95441fcc58321213715df8477b63ba53f4cd", - "format": 1 - }, - { - "name": ".github/patchback.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f28653c2f8d2965a55f76092049c4205a9c7f828e4edbd1cd089f7dd2685f93a", - "format": 1 - }, - { - "name": ".github/settings.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e0381b42e525395bcf6c9e21de33e23ca8cace574b8ef85902a36ce606d3a991", - "format": 1 - }, - { - "name": "changelogs", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "changelogs/fragments", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "changelogs/fragments/.keep", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "changelogs/.gitignore", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "919ef00776e7d2ff349950ac4b806132aa9faf006e214d5285de54533e443b33", - "format": 1 - }, - { - "name": "changelogs/changelog.yaml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fc99147da9dd277a5e0718ac311034e1f0d7841a2ddc1fdf48d3d2b14738fcad", - "format": 1 - }, - { - "name": "changelogs/config.yaml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a31925d687298c2d9568cd8a6083c015024ba3560d3275fd9ef0b1e9e8f6b378", - "format": 1 - }, - { - "name": "docs", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "docs/docsite", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "docs/docsite/helper", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/example-001_vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/example-001_vars/default-common.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b59292184c20294d3e7608c6b970358f669d617f432f4d2ff7969a05a8560d75", - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/example-001_vars/list3.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "124727db3b24565eacc8316782db1ad61d34c66973f84aa4367fc6e8374b7593", - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/example-002_vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/example-002_vars/default-common.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b59292184c20294d3e7608c6b970358f669d617f432f4d2ff7969a05a8560d75", - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/example-002_vars/list3.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5f8cdc658841376d10f5755fa855f6462fa50a9d84c2272393e554a786e2a0fd", - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/example-003_vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/example-003_vars/default-recursive-true.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f716c819d3f2e7619a234a79f066165a24c34304a59c9e24b1f6f049035407e5", - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/example-003_vars/list3.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "372fa863d5e78b00aeee2f74cd394f69eeb41234667315c5b23226cf8adb2ca5", - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/example-004_vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/example-004_vars/default-recursive-true.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f716c819d3f2e7619a234a79f066165a24c34304a59c9e24b1f6f049035407e5", - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/example-004_vars/list3.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3c8afda6cf2d26ae54e12b82771f759347d290c3da1a1f1d161bead215ccdb58", - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/example-005_vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/example-005_vars/default-recursive-true.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f716c819d3f2e7619a234a79f066165a24c34304a59c9e24b1f6f049035407e5", - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/example-005_vars/list3.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2cd75720f897d6484c8cf128c3a4cee776b2ba7bfda588c377b42ea2df77769f", - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/example-006_vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/example-006_vars/default-recursive-true.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f716c819d3f2e7619a234a79f066165a24c34304a59c9e24b1f6f049035407e5", - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/example-006_vars/list3.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d6aabc04b58d2a62e22e8ba73a5ad9bc54115f1b5567c9d6fd3627f2b78f4f1e", - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/example-007_vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/example-007_vars/default-recursive-true.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f716c819d3f2e7619a234a79f066165a24c34304a59c9e24b1f6f049035407e5", - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/example-007_vars/list3.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a5a6c28ca3d0b57d1dcf91c93421b1de878b57cc74db5328080e2b89710416f5", - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/example-008_vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/example-008_vars/default-recursive-true.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f716c819d3f2e7619a234a79f066165a24c34304a59c9e24b1f6f049035407e5", - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/example-008_vars/list3.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d1153c8f98613b91afc3ceed3f67d37437ccb3bff135ceb753c0a18e0b3725b0", - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/default-common.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b59292184c20294d3e7608c6b970358f669d617f432f4d2ff7969a05a8560d75", - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/default-recursive-true.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f716c819d3f2e7619a234a79f066165a24c34304a59c9e24b1f6f049035407e5", - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/example-001.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ef524048f5428f3b554e450b33d52eba16992be3bea78f478ab2ed145658b143", - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/example-002.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "58ffe71c339fb197ff96e2310dfdd5382817f27168550881f2e899a74cdff45e", - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/example-003.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0ff4f9a245a38cc2604c0918066666bb9ee70f1ffaab7dd024605c865a8a4712", - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/example-004.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "32a3d7c2172e552a9242406b89ce18069e3402326d433774da71eafccb3b6a7d", - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/example-005.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "841054e97bab645c017ad516606400d7f38b93e99b1b2de512c9184ce55c5aeb", - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/example-006.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e770a5fefc343eee1eeeb1d196dc83c65ad980ca560bd6931ed8155f1be72213", - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/example-007.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4a90d6df4eb06a73e032b977783ef63fbca76639565e6ba88c72e91036fb2c74", - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/example-008.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9483a524f54b54f2b85382ca5f25fb3dc404b83cd69a095632d2097590d8ce31", - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/examples.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bf392d2344f6e1e286ff3f963f4cd911a7a5d64d00e5892b99f457b0aa2e5180", - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/examples_all.rst.j2", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c3715738ea49e65ee4be377dd05879d368f30a2524c85f65cbc47d5c0e1f90c5", - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst.j2", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1d66e2b303f2aa06758a54e7fbf8145fcd2d3026e7a4de4e6bec1bcea72b10a6", - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/list3.out.j2", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "06392cd62d44858d2ebe996653f3ef1177118e40fc49da734f92105ed17f2454", - "format": 1 - }, - { - "name": "docs/docsite/helper/lists_mergeby/playbook.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8822928d0e5ff719ed6b6f79ead4b319428244e71af2716d7f3407d938b758d4", - "format": 1 - }, - { - "name": "docs/docsite/rst", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "docs/docsite/rst/filter_guide.rst", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5dd351082133b92222d674bb84a7492b3914b2fb892ac0d529c3179a20960d1e", - "format": 1 - }, - { - "name": "docs/docsite/rst/filter_guide_abstract_informations.rst", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e7ac0c375a05277db0cb3877a2858d320838b25179be76255ddacdfc9ebbfda6", - "format": 1 - }, - { - "name": "docs/docsite/rst/filter_guide_abstract_informations_counting_elements_in_sequence.rst", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "27eb1e21de980780e97423c7517b2f1fe3ce2f3e7feb13e8101dd6ed03b12021", - "format": 1 - }, - { - "name": "docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ee3b7aa7d966d6589d128753c66cfb17a71129de7a97dd5227121b4d439d9c94", - "format": 1 - }, - { - "name": "docs/docsite/rst/filter_guide_abstract_informations_grouping.rst", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3212e2c984c60a97aa8b7120797299433f91979a29f7d16cfc8d29e3c83279cb", - "format": 1 - }, - { - "name": "docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "efd8b7219cdc908d82c696bb659676911abc6df882cc8119dfc425c61e90a7b6", - "format": 1 - }, - { - "name": "docs/docsite/rst/filter_guide_conversions.rst", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6781fe08e818dca9601c2f5e8604ac42d23a32747e31b1a4c57af8f8f5c9c86d", - "format": 1 - }, - { - "name": "docs/docsite/rst/filter_guide_creating_identifiers.rst", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "44f1bec70452d5e3bcf6f9e090a4c31e554ff1a4f113e82249ce880f02ed3292", - "format": 1 - }, - { - "name": "docs/docsite/rst/filter_guide_paths.rst", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0ef986b82c012433f93e354da4e4ff014a4f6adb80d1b859435acc013f588d26", - "format": 1 - }, - { - "name": "docs/docsite/rst/filter_guide_selecting_json_data.rst", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4a1c451d18cd6d5e202f9328598c3432f816270bfc097956888cdf7db6da258e", - "format": 1 - }, - { - "name": "docs/docsite/rst/filter_guide_working_with_times.rst", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "447f2f740573115ce6e67b9c885d86cb1619f49173c23135c633aa47f6035100", - "format": 1 - }, - { - "name": "docs/docsite/rst/filter_guide_working_with_unicode.rst", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "452dc71c1cc3073d9272179ed5670a892c54c7a32901e1c8e8e5ccca6f6c710d", - "format": 1 - }, - { - "name": "docs/docsite/rst/filter_guide_working_with_versions.rst", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fd43a5e7c1176513c891f7e1969676fc0ee62f2a7f2bbec17214e4f89b569484", - "format": 1 - }, - { - "name": "docs/docsite/rst/test_guide.rst", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8c5039a3affa314b180ab9ee304c1516bda3ea7309664d7743b205be539e90db", - "format": 1 - }, - { - "name": "docs/docsite/extra-docs.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d5ced61d7909c0f08262fbb375ced0dc6f75f8ded8c20f432001ddbf37fab47a", - "format": 1 - }, - { - "name": "meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "meta/runtime.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "55147edd582b4723e052ff22be6afde181ce16a1d0d9bc453edadbc05a4f48e5", - "format": 1 - }, - { - "name": "plugins", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/action", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/action/iptables_state.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5e67b7ebc904eb63f86c197d95409f0483f23f805f5a0816191ed3b8546f474a", - "format": 1 - }, - { - "name": "plugins/action/shutdown.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c14173d1cb5da24064a3e0ae193caf7e27170f30c18c9d1dce6040a9529b1d9b", - "format": 1 - }, - { - "name": "plugins/action/system", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/action/system/iptables_state.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5e67b7ebc904eb63f86c197d95409f0483f23f805f5a0816191ed3b8546f474a", - "format": 1 - }, - { - "name": "plugins/action/system/shutdown.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c14173d1cb5da24064a3e0ae193caf7e27170f30c18c9d1dce6040a9529b1d9b", - "format": 1 - }, - { - "name": "plugins/become", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/become/doas.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7417aa750f35679d63d98e5a2d7c0a2a1fc999b779b00a78a22e5d92879d3b56", - "format": 1 - }, - { - "name": "plugins/become/dzdo.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d6fe0252fe0993b2d11c3ee4db0ca1fef4eda6453da6658bf7c08aa39a8a47b2", - "format": 1 - }, - { - "name": "plugins/become/ksu.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9df0c91601626ebbb74475f1c264db19f5aaf6d0100d0c173f89214db480ba69", - "format": 1 - }, - { - "name": "plugins/become/machinectl.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "65f5821799afbcf2492a3732734a71a508d7c138ad65820c57ea10291dd58b90", - "format": 1 - }, - { - "name": "plugins/become/pbrun.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "75a5d44918f3fc4b9b95b3dcc8f268632728c9f6d84611e3c1381dfc46b5fb68", - "format": 1 - }, - { - "name": "plugins/become/pfexec.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b467c2a91d39a4dae3de752e304e9350b6cf3b59f3133961de2375919f0cf52b", - "format": 1 - }, - { - "name": "plugins/become/pmrun.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0fcc2a009001fd94145011ac79ddb5560633edc690c8a61a0e321d9a52d65d87", - "format": 1 - }, - { - "name": "plugins/become/sesu.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1256af9d7fdef2e198354945d3dfa9e91b026be4d745fa9230945869d7c282e0", - "format": 1 - }, - { - "name": "plugins/become/sudosu.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "45fe5493856a68a9c849b294f69ea32527d2357978acc3829ffd64c045cc6e1f", - "format": 1 - }, - { - "name": "plugins/cache", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/cache/memcached.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "06c967f9a9b9079174bee6fd175c27298c7618c0047df1dea170acab4feb2cfd", - "format": 1 - }, - { - "name": "plugins/cache/pickle.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c90e85c3e31a5389b36e499a533f1df83aa2557dd202f9defe4218edc0441d92", - "format": 1 - }, - { - "name": "plugins/cache/redis.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d834deaeb93249049ab3adca4fae0c71817b2ca5bfd5a6b65e04a82c3d8ed16a", - "format": 1 - }, - { - "name": "plugins/cache/yaml.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "384760144b4430feb9d9ab7ca90c8d3e3cd72d4cee2baf09e302f2730de116e6", - "format": 1 - }, - { - "name": "plugins/callback", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/callback/osx_say.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "55a7c838e72b1969ec94bad4afefc9da350e21e7f96cdad3f6d7e2a758c2cdbb", - "format": 1 - }, - { - "name": "plugins/callback/cgroup_memory_recap.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c78c0cd15d78c87c7b24db3f64bf5d7a609d3156081923b8fe823b129e333af8", - "format": 1 - }, - { - "name": "plugins/callback/context_demo.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8d537228fd64c0872560a9885d23802b8a2fbb3776c15c374dd6d6eb8d1021c4", - "format": 1 - }, - { - "name": "plugins/callback/counter_enabled.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b8f9c4ca899b057e804fde54ce1203de367e7277e631b89c907dddc71df0da27", - "format": 1 - }, - { - "name": "plugins/callback/dense.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9b54b9ad005054a39bf3e97550263859b91b2e31d3663c4d5f71373b48cd7133", - "format": 1 - }, - { - "name": "plugins/callback/diy.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "831aa82c12a345a06b29cf3ca6dc23c89b80603ea79a05adc03a602d8ccd7eb9", - "format": 1 - }, - { - "name": "plugins/callback/elastic.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ea637f02072dd92bd942d49b5d970e3c5693a3b461f70c8fdc5872c553a9948c", - "format": 1 - }, - { - "name": "plugins/callback/hipchat.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e363830af498823fe22e96671c5f6c5c725c13f6b499aba2f3980151816d5559", - "format": 1 - }, - { - "name": "plugins/callback/jabber.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "774ea134645b688b3cbfc1a2460859420d8fc836df4ca1f989255ee5bc831891", - "format": 1 - }, - { - "name": "plugins/callback/log_plays.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "89c06df364e2b945ccc5a29b4e423d40c77cd8a49e8f9df9631c12a166171202", - "format": 1 - }, - { - "name": "plugins/callback/loganalytics.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7e761d5a9a8a2f94597fa10f7e4d74dc92618d2a642181e6ac9d605c4a97c4ff", - "format": 1 - }, - { - "name": "plugins/callback/logdna.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2bb93c4197c2fef50aceafba9bbe69d9f6f769fe03add3e34ab19159d23c86e7", - "format": 1 - }, - { - "name": "plugins/callback/logentries.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "39947c41dda2778fe14bebd157b5fcd46e974cfe35920d5a4a21c3d04ef3d9db", - "format": 1 - }, - { - "name": "plugins/callback/logstash.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "105116672adbb9137f73822d07372373fe8666d4c716a15c4821e470ed2dadce", - "format": 1 - }, - { - "name": "plugins/callback/mail.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "886722a8b4ea343316eab7c1f8439f693ee7bc56efd5ab452a3e9222478cdc5e", - "format": 1 - }, - { - "name": "plugins/callback/nrdp.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "22f2697963c0c64eaa67d877955a6e1fac87b5d18d2ec42f75022c5051a118af", - "format": 1 - }, - { - "name": "plugins/callback/null.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0d11ed31f94e426af5c0676353ca67fc24270f7ed8f5a2f43cadf99c5f3b7b30", - "format": 1 - }, - { - "name": "plugins/callback/opentelemetry.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bd36601aee1e44919ef754413cf10a3c191b83ebd2fcbedc5cdd2ac2f4dc892b", - "format": 1 - }, - { - "name": "plugins/callback/say.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "55a7c838e72b1969ec94bad4afefc9da350e21e7f96cdad3f6d7e2a758c2cdbb", - "format": 1 - }, - { - "name": "plugins/callback/selective.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c947c8f7b9129cc4e44ae76af714fb9bf700031f8da738893dd68804592df370", - "format": 1 - }, - { - "name": "plugins/callback/slack.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "11b70ce2822efe57ebb9839a2172c09e95f3466fa0fbc100d1e4dc025feabbe3", - "format": 1 - }, - { - "name": "plugins/callback/splunk.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ea1a28f51e7574c5f22dd955a2a1d37bd8d3a0381be83e27b1ab530e561f57a1", - "format": 1 - }, - { - "name": "plugins/callback/sumologic.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e8d06234d12a2b4fd38510a86159ad3d9e163f97739c56581f595254f4de64d7", - "format": 1 - }, - { - "name": "plugins/callback/syslog_json.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "83af94d297e9dfb004d89167b04e26563721087dfb66c4665f4536def3fa6e21", - "format": 1 - }, - { - "name": "plugins/callback/unixy.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4dd32eff531ebed7499a6e94812f1a0d1b93f697361bca14fbea1c75f840b632", - "format": 1 - }, - { - "name": "plugins/callback/yaml.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "69838b12ded886a7663173e305dc10143baf8a8dfb51458cc4d323728d5c318c", - "format": 1 - }, - { - "name": "plugins/connection", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/connection/chroot.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "322bae37ba470f073f786622847d83e2a3b2a170349e8511a1500907ee7be3ba", - "format": 1 - }, - { - "name": "plugins/connection/funcd.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "190eb5364ac8895b86b364860544f3a9a28cf77ad1f406e089667166ac5cf8c4", - "format": 1 - }, - { - "name": "plugins/connection/iocage.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "260bfc99a34252f9a38a19b1738814f3dc6aee1aa15434f0e963dcc014f32381", - "format": 1 - }, - { - "name": "plugins/connection/jail.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "239beb53a3ddf81a855f194f2ffe50133b2d2bb5f80c7ca2fb1673220f022d4b", - "format": 1 - }, - { - "name": "plugins/connection/lxc.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "852b490a76392ae10e3ec3ddddd7a6efd66cf67718076cf5694c0c314fe6273c", - "format": 1 - }, - { - "name": "plugins/connection/lxd.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2758cf8b7b184104def140b85ccffd7240aece938c2211a890934ed5445496a9", - "format": 1 - }, - { - "name": "plugins/connection/qubes.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cc1e03e94beb0355bb25daa42ad8ea14549aca47ee296ad7c7d91fc301db42c8", - "format": 1 - }, - { - "name": "plugins/connection/saltstack.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "281ea091d1b347f5d82e31b3a43d75d8a4e83a5557793c74c8e3dfd1b74556bc", - "format": 1 - }, - { - "name": "plugins/connection/zone.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f5933c5f126170dfe744347811c69efe60e14022ff12684c6b12481c85c8f4ae", - "format": 1 - }, - { - "name": "plugins/doc_fragments", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/doc_fragments/alicloud.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "678f5b64368f51cc5ff9bbbac69c2b722cba9408176523d0a874eeec0d2d8c46", - "format": 1 - }, - { - "name": "plugins/doc_fragments/auth_basic.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "34740866c7bdbcaed75b7d4414e978ed24a2e05424e4a2af4e179fded67ab950", - "format": 1 - }, - { - "name": "plugins/doc_fragments/bitbucket.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0e68bd78cd3a1bc6c4b17428e6ba9e0b3761f4ede3d0a1ca6964124d47c9e476", - "format": 1 - }, - { - "name": "plugins/doc_fragments/dimensiondata.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a34334fca7e874a3c5381729e38804f893a49869e664b5098d2340074526b15d", - "format": 1 - }, - { - "name": "plugins/doc_fragments/dimensiondata_wait.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "854970d12c42e53dd93c01cc6c00f4561c29503a0cb50f8ba7d74d85abb67047", - "format": 1 - }, - { - "name": "plugins/doc_fragments/emc.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d8e4738088f16fdf47cec032f3046c5dce8a520a06d4e3c47e4f8d7bbd978599", - "format": 1 - }, - { - "name": "plugins/doc_fragments/gitlab.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "48058c3ef0154703e8184c3d51345cf10f3f805aef57c936727253270474cba0", - "format": 1 - }, - { - "name": "plugins/doc_fragments/hpe3par.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ed065527eb18c4d72c5ab95a898bad90882cafcfff03be3f22823779ce8b9a1d", - "format": 1 - }, - { - "name": "plugins/doc_fragments/hwc.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cb061c5a72b42d57ec94959856ade7a12394b95cabcf7b6c64af2339876611b6", - "format": 1 - }, - { - "name": "plugins/doc_fragments/ibm_storage.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "aa82aa6805f786bbffc1c2d0740fa230852373ce44a38a7af28e7f880f998e61", - "format": 1 - }, - { - "name": "plugins/doc_fragments/influxdb.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "eb088065bb558d8bd0fd38facbd3e569a75cf2350ff54bddee7ec87d25f3391a", - "format": 1 - }, - { - "name": "plugins/doc_fragments/ipa.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5f952ad0bc01198e8db6f21143273782cab239d5e6acc035fd4606e0aabbfed2", - "format": 1 - }, - { - "name": "plugins/doc_fragments/keycloak.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a48e78b6913388c7e758243910eedd30ec2319e0d7ed4aae71def7bf865929b8", - "format": 1 - }, - { - "name": "plugins/doc_fragments/ldap.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e21d098d79bb479a2fa47e655cb2fba729fdfb233a4bf2a638fe2703ebb479da", - "format": 1 - }, - { - "name": "plugins/doc_fragments/lxca_common.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6f3c7661305f8b89b2e4a611bfbdf08a9ca2585d90fe0b156747eb45d1d6a09c", - "format": 1 - }, - { - "name": "plugins/doc_fragments/manageiq.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9d04400f964b445c2ba413c5028e62f5e4411c7daac5ee7c520c9da8a4adc1fb", - "format": 1 - }, - { - "name": "plugins/doc_fragments/nomad.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7bfef29c982c903cd3263b446dcd28eed54bb5f5834a3a848425f478634fa9d9", - "format": 1 - }, - { - "name": "plugins/doc_fragments/oneview.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b56ef83547e88ad7eb9f15732dca7f8eac1a4e82b5146ef8f783d3fcc5e94d13", - "format": 1 - }, - { - "name": "plugins/doc_fragments/online.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a23dcec58b0d6fabcde691111466b6ffc6db5d8034de41f4da96eeeef3789a35", - "format": 1 - }, - { - "name": "plugins/doc_fragments/opennebula.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c09c661f3141cee65af3798e6d8e99097cc80a61acd36ad52e41a7b12ba6b5f6", - "format": 1 - }, - { - "name": "plugins/doc_fragments/openswitch.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "803130c2ab3075b5553a20bb57cc40db502f37699f9e0e90d23539a1d04f45f1", - "format": 1 - }, - { - "name": "plugins/doc_fragments/oracle.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "53102f2a2e5a1b856ace02593803940baff8d355a346ee41a66d46fc918ef066", - "format": 1 - }, - { - "name": "plugins/doc_fragments/oracle_creatable_resource.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0d5149fc41043c8566f45e188a4d7ea02641ef62e176cabf079da16d145f6fbf", - "format": 1 - }, - { - "name": "plugins/doc_fragments/oracle_display_name_option.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f0639a4b83c44df4fb5af0e879f0d8ebfeccaf2d385cd5be7128da41ca52bd8f", - "format": 1 - }, - { - "name": "plugins/doc_fragments/oracle_name_option.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "01bc8f3275fa439749fcb06191ca97459f66137434ae38979ef0c62cc56c1be9", - "format": 1 - }, - { - "name": "plugins/doc_fragments/oracle_tags.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "36a057fbe4873583cf0983070d75552a3d2db084e11c6f9b47d1e51585749df9", - "format": 1 - }, - { - "name": "plugins/doc_fragments/oracle_wait_options.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "808e217393514cca1cd235ccbb80dfd09029af85325edca13f63f352c2f11e34", - "format": 1 - }, - { - "name": "plugins/doc_fragments/pritunl.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cf7ab079e36719f73c8a7598062b3e4f7b0d2a2f55e7e2b92e170b6e5ca1541a", - "format": 1 - }, - { - "name": "plugins/doc_fragments/proxmox.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5909e654a318bd6a6ec20270a7e12b61a26d67df1447d05b50846acf0df5022f", - "format": 1 - }, - { - "name": "plugins/doc_fragments/purestorage.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5ddc57c545438fa417cd600b412758153396222b802ec28968f57b11f6031cb8", - "format": 1 - }, - { - "name": "plugins/doc_fragments/rackspace.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "22456616c25433541723ad90c6fb91b09fa52a2c1bf925c93cb5cb7dcd73f2cb", - "format": 1 - }, - { - "name": "plugins/doc_fragments/redis.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c867d37c83553d1d8b4ab1e2f0ddc9f5f313047df27e0ffe9fc73807a66ef2ec", - "format": 1 - }, - { - "name": "plugins/doc_fragments/rundeck.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1128f3948d9ef91920987b76b7f70fc3b36b416376528c0d5a5e3568384543c1", - "format": 1 - }, - { - "name": "plugins/doc_fragments/scaleway.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "45b9ab99f962f8c87c100b113c98ccac2789142220e55851c1239bab43c657cc", - "format": 1 - }, - { - "name": "plugins/doc_fragments/utm.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c595fd6936490911e09c041bf49e93933278c65a838896b95075432a3a7e6acc", - "format": 1 - }, - { - "name": "plugins/doc_fragments/vexata.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c8039bbcfe59a10d3db8062a7bf6ec797a0cd96d5d1163635fed5db388670d9a", - "format": 1 - }, - { - "name": "plugins/doc_fragments/xenserver.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "84cffcdae284ac8290d7f2865f536fc13bd35b4cd31d4a5eaeef89493f53b64d", - "format": 1 - }, - { - "name": "plugins/filter", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/filter/counter.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "594012ed8bf9714c030858fa25bab1b950161c0a2ae683fc009eac580bdc79f4", - "format": 1 - }, - { - "name": "plugins/filter/dict.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "37f731a7b4ef003d88eac100709c097a7522c8cfb27688ea95565cb096b226ed", - "format": 1 - }, - { - "name": "plugins/filter/dict_kv.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "162301ca8d64b1366113df22068b7b9a150610f5ab13023beb846c176863bf86", - "format": 1 - }, - { - "name": "plugins/filter/from_csv.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "121f47a203202d50e035cb54fcf119e710c7557a894745d26c5643733db130e1", - "format": 1 - }, - { - "name": "plugins/filter/groupby.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "530d6dc0f8f500d68393de2f744261fb20e1457f01183af02fbfe461c890acb9", - "format": 1 - }, - { - "name": "plugins/filter/hashids.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4e5740394a9f9d7f8b1333e4b200b60cdf8eea18aa70bae28ae7fcc9df70562f", - "format": 1 - }, - { - "name": "plugins/filter/jc.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4ee84bed62711c60d2c0ae37056801b389f1ce1fe588efe0f3f9b566a74144a6", - "format": 1 - }, - { - "name": "plugins/filter/json_query.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1754fc223cf8315816d846798dad5e9a07daef8e1b6adaa282b15afa3ca48983", - "format": 1 - }, - { - "name": "plugins/filter/list.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7767a0d9040ded2d5c06474aa51c858c1c6bf97786b26f9c30f0614feab5e905", - "format": 1 - }, - { - "name": "plugins/filter/path_join_shim.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "af24e5338d8ad56ed65bbdbccc59ab1b3d688085c42051e935fb1ef1b009dbea", - "format": 1 - }, - { - "name": "plugins/filter/random_mac.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1907c71d6eb92015868e1bc393ca39a5ebe312759a41a629eb48be54b65fee43", - "format": 1 - }, - { - "name": "plugins/filter/time.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "80a25fc2ba18f1ce7a68017a1f5af5435f40eee159c07be188f1fc51b3818d73", - "format": 1 - }, - { - "name": "plugins/filter/unicode_normalize.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "434484e773bc02a9aed066c7166b8f6d30a937b22c7f177cb4a5cee5733b3e08", - "format": 1 - }, - { - "name": "plugins/filter/version_sort.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "818111a99a79f02fa838f8340897b914a84304b1ff24fb77eb33f0a6757e948e", - "format": 1 - }, - { - "name": "plugins/inventory", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/inventory/cobbler.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9a6b423e69705b2bc5a4bae307cba2bd41f3d9ee94c398846cbf1fb33df7a509", - "format": 1 - }, - { - "name": "plugins/inventory/gitlab_runners.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5f490cc333192da437f9ca31881b14db49cd3c2b44d5481d126463968e65f700", - "format": 1 - }, - { - "name": "plugins/inventory/icinga2.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cd2b8abe72349f95ba1b23d5daac2e148ed50c80a4ccb4aad31658d93b2bd1b5", - "format": 1 - }, - { - "name": "plugins/inventory/linode.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "490656d2d37ed9d1499662d5a2a9269a18922ce40e1611441450fe31cef73d35", - "format": 1 - }, - { - "name": "plugins/inventory/lxd.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2b8de32e26a01c46eb80084c968b2f1c808f5bbb76a5569ec1746e9191a380e9", - "format": 1 - }, - { - "name": "plugins/inventory/nmap.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "34512b9e6ebaf0d368d03d2bcbf9d184a6b796210a71d256976dc2b4ec5e1c3c", - "format": 1 - }, - { - "name": "plugins/inventory/online.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c4d33407094a14d875e37b46acaae929212d70046ee3e90a9376795a678fd59e", - "format": 1 - }, - { - "name": "plugins/inventory/opennebula.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b464e03664f7e72776d52cbac1abdaf6358b303cc726d1cd4196fe92542d4522", - "format": 1 - }, - { - "name": "plugins/inventory/proxmox.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9d517b140e791d044c6dc3c34b7be7a1ae6e3a0045f11f85ee0627f3e3b7f46b", - "format": 1 - }, - { - "name": "plugins/inventory/scaleway.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ed112e682df95e0eb4432d7824951e60f0908ba09c3f5612df114ab442baad97", - "format": 1 - }, - { - "name": "plugins/inventory/stackpath_compute.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "086a7a3c09bbd181750bb397399495d30acee51484ab0930ac2b1f1ccf1a44af", - "format": 1 - }, - { - "name": "plugins/inventory/virtualbox.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d7dd10c1c99e7486ea054780cea690dd122df85b200b97853ae528bb2cdd85cb", - "format": 1 - }, - { - "name": "plugins/inventory/xen_orchestra.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "64120b597c9c0946705db83a4159f8714bac9da25cd5716f9dfbf55247b6d7a9", - "format": 1 - }, - { - "name": "plugins/lookup", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/lookup/cartesian.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "601cdd5c19a9f962bfb78eff1140e06143d22253cc42c3352dc3535d56727010", - "format": 1 - }, - { - "name": "plugins/lookup/chef_databag.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fc62bc09202aa868006f820cb54ca8081f1839a98b65ff4eea643832cd8d984e", - "format": 1 - }, - { - "name": "plugins/lookup/collection_version.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ecce1b17933bb8618696167c68794e0f285ab0254556857379a3438846b3393d", - "format": 1 - }, - { - "name": "plugins/lookup/consul_kv.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d8776417592ea1e3a4902cf5062270d4a72fecd556b58d87ae3a1390dd7fec00", - "format": 1 - }, - { - "name": "plugins/lookup/credstash.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "da6715b6fa5fadbe9f7bf0f6f3941bd420d6c95d8c93f1af08d14d68cd509b16", - "format": 1 - }, - { - "name": "plugins/lookup/cyberarkpassword.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "49f79f62f69b96251569e967cc3e5444352f8fd93f3425bd9df49dd34a76a488", - "format": 1 - }, - { - "name": "plugins/lookup/dependent.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "05ffac09abc7b434be1ea9d4f24ec8ddb06a1e014f91bb94fa0542abf3d139dd", - "format": 1 - }, - { - "name": "plugins/lookup/dig.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3c283980b0e8038c1be6f6a41091266c1e486ba3096b5ab702674cdb956790a7", - "format": 1 - }, - { - "name": "plugins/lookup/dnstxt.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8d09c809d3862854e8899785110027dc3478ed1fdec294bdcf88218caacc0fcf", - "format": 1 - }, - { - "name": "plugins/lookup/dsv.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b358f5ee14615d293267f67a70d5586437a3571f5e62890e2751793f63802967", - "format": 1 - }, - { - "name": "plugins/lookup/etcd.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "98fe425dcdbe482ea1dabe239041b098cd5782f253f29d764023e908d2d72b5d", - "format": 1 - }, - { - "name": "plugins/lookup/etcd3.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c6dfc208189b24a892a0039a75003388d37629a4364bb592e5c6a83d1907c5f4", - "format": 1 - }, - { - "name": "plugins/lookup/filetree.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0354e0643410f2eab47a1d92f0500779a9bb1e189ef353494e0585abab136938", - "format": 1 - }, - { - "name": "plugins/lookup/flattened.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "efc7d3e2f66fe14c0b392ae86fc3dd911a88fb27ae95af94b944e74a9b6873e5", - "format": 1 - }, - { - "name": "plugins/lookup/hiera.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ed4305bb68b4c75ba0d85b4a81c2f26406335df9e38ad39c1628bdb5c4985df2", - "format": 1 - }, - { - "name": "plugins/lookup/keyring.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "44734661d6389b0d36cdc184bd2a31c7acaf0e3dfc962b66c68889b3c4b0c244", - "format": 1 - }, - { - "name": "plugins/lookup/lastpass.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3b03df385693176d05d1c8476ebe517c20c5d940addbaa291979232d21949d8f", - "format": 1 - }, - { - "name": "plugins/lookup/lmdb_kv.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f731830e02139284d7e0fe7268aaa0477ed7ef71edbc6f824207ef02adf5f412", - "format": 1 - }, - { - "name": "plugins/lookup/manifold.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "12f925dae69fa1397e6b2991497d926cafbbe26909e53bb32483500108837680", - "format": 1 - }, - { - "name": "plugins/lookup/onepassword.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e2ee9db11ee7c83320d4d782844075a8cb850cf342b3e0d4590c00df975762be", - "format": 1 - }, - { - "name": "plugins/lookup/onepassword_raw.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "745ef11eb06162d4b1d7eaed2ff0167862c690009f590e990745963d5306190c", - "format": 1 - }, - { - "name": "plugins/lookup/passwordstore.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8c18082005704e6154682ab99c1454dd78be164f8005e4cc03a928aedbde48d9", - "format": 1 - }, - { - "name": "plugins/lookup/random_pet.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "37bf4ac9d16cbfe47ab0e2dd79366d501ce7292b39a4b99557fd1f24d16d9541", - "format": 1 - }, - { - "name": "plugins/lookup/random_string.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8d1e85d9752a4c1441caa617558296569cddbb2d898cabacf769cb2a8f5ac113", - "format": 1 - }, - { - "name": "plugins/lookup/random_words.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cc0f56ec2996fe8016e9de309cae58f1d77206820740bcb4a75cd8603aa0c4f3", - "format": 1 - }, - { - "name": "plugins/lookup/redis.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3c8ec26f3669649cc04347ceb8c9d255df4bc610a7785e4b45770f12ec6d0742", - "format": 1 - }, - { - "name": "plugins/lookup/revbitspss.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "63dba93df50106fa2781e584db2ba5d16a87b982a4927ece21d9d8ec1304b2d8", - "format": 1 - }, - { - "name": "plugins/lookup/shelvefile.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "037c25ee46d4e4702e4893eee1afa2fc99366485d278997747e5b5758fc96dee", - "format": 1 - }, - { - "name": "plugins/lookup/tss.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6eb4f8b5253c68a19479b23f19de02c03722065396b996ddc5dc8facaac2ff43", - "format": 1 - }, - { - "name": "plugins/module_utils", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/module_utils/identity", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/module_utils/identity/keycloak", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/module_utils/identity/keycloak/keycloak.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f53ff61bda7067e63b100dc96f1516f98d21cae8dee06530c630ac7a8ac6b5b6", - "format": 1 - }, - { - "name": "plugins/module_utils/mh", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/module_utils/mh/mixins", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/module_utils/mh/mixins/cmd.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "93dd46c75ef7fb7c459a2635e262f7fdbbfafccc170ae0b28f2ec9c978eea99a", - "format": 1 - }, - { - "name": "plugins/module_utils/mh/mixins/deprecate_attrs.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "dff6ebcc0c94de20c94d90143682bf29c619a15a70b185378ab8918ae7c1e658", - "format": 1 - }, - { - "name": "plugins/module_utils/mh/mixins/deps.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "25cd3ff114d835d74e5065bb13cd1ad6401f02674eb007570423245248a53c57", - "format": 1 - }, - { - "name": "plugins/module_utils/mh/mixins/state.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2b7973ac3e0e31ceb5f3864c50baa4f130820714c69c82e24b1e2329f637eaf9", - "format": 1 - }, - { - "name": "plugins/module_utils/mh/mixins/vars.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "73ecfc76056264c5d683ef0b207e65c631a3160b23436340e8e0859bdf2f39c3", - "format": 1 - }, - { - "name": "plugins/module_utils/mh/base.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2829bb2e209d6e264910506727a0579afdf891b04c751d60fcfd5308bd0e0856", - "format": 1 - }, - { - "name": "plugins/module_utils/mh/deco.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "11a2e78b057f9d0c1b6aae0012273737a230b89a1b748689076c19311e06d19f", - "format": 1 - }, - { - "name": "plugins/module_utils/mh/exceptions.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "87593b77ff79dd644b6d815ed269b743b1f425f9e689b8dfe928138b2f957b73", - "format": 1 - }, - { - "name": "plugins/module_utils/mh/module_helper.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "15968853409eb2166c53e3ab6f4d3331fe92bf70f1e1a39e6d6e307a6114cdde", - "format": 1 - }, - { - "name": "plugins/module_utils/net_tools", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/module_utils/net_tools/pritunl", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/module_utils/net_tools/pritunl/api.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "154a0510cdd87e8e3559582f1f9125186d3f15596d78e9b9b3fa713f223dc338", - "format": 1 - }, - { - "name": "plugins/module_utils/oracle", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/module_utils/oracle/oci_utils.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "532c951a79e9bcfe9c8066214d668e3bb69800d4081d9b368e736f5641df4783", - "format": 1 - }, - { - "name": "plugins/module_utils/remote_management", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/module_utils/remote_management/lxca", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/module_utils/remote_management/lxca/common.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "da683cda97e3b7089a56a4cc74e92a17d15d122fc3539437c68fc640be012e4e", - "format": 1 - }, - { - "name": "plugins/module_utils/source_control", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/module_utils/source_control/bitbucket.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "172c5a258df5efbf9200ab18706aeac4a1707268de0c0725f906f98bc4ddac0e", - "format": 1 - }, - { - "name": "plugins/module_utils/storage", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/module_utils/storage/emc", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/module_utils/storage/emc/emc_vnx.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "78c9bac2963bbac14c05810618ab082b46049456cc35be571235fb4fd0ff0466", - "format": 1 - }, - { - "name": "plugins/module_utils/storage/hpe3par", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/module_utils/storage/hpe3par/hpe3par.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b978e81816e639956697e866ea1249e3187b1500440a6bfe723d896fa64bbee8", - "format": 1 - }, - { - "name": "plugins/module_utils/_mount.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8919224fab73d91d655912c780866109fe811ee9b34af3d36c72f663fbb3a8d2", - "format": 1 - }, - { - "name": "plugins/module_utils/_version.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "25726217922373642c280d10e4949b6424c22ce3067e22e5a1d343e9943d54c0", - "format": 1 - }, - { - "name": "plugins/module_utils/alicloud_ecs.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3c6d509c6f6c58594ff0b0813872a65478c19315af52c86ec334132ffd003159", - "format": 1 - }, - { - "name": "plugins/module_utils/cloud.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1691ca71996ba84bcc8abd35da7647705035f1e390672fe7962dc502617d8a4f", - "format": 1 - }, - { - "name": "plugins/module_utils/csv.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4cc33c9b0881219a255af4669ed2ead6b8675769e3fc9a94ce3a17d6706202c1", - "format": 1 - }, - { - "name": "plugins/module_utils/database.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7ecea17a2a45cb079354e6bcc8bf043fcd3373f103f0a9c4aaeec22bdbbf5b0a", - "format": 1 - }, - { - "name": "plugins/module_utils/dimensiondata.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "aa6a1917eb2c3cf04159130fac79f317e84711d539a7f2ab9eeeccc7df7de2ba", - "format": 1 - }, - { - "name": "plugins/module_utils/gandi_livedns_api.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cbf2e1b3dc0fa2080a2b36a2bb72979af470f60c5c012a6ae9f35b0fe22a8d40", - "format": 1 - }, - { - "name": "plugins/module_utils/gitlab.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5e0270a52a05723162d30353adaf7d1f89068c3b3a69dbf840317b48377d5279", - "format": 1 - }, - { - "name": "plugins/module_utils/heroku.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "77b1c51c9c98a9093a964de5991e379a140002513c1b42df54a127a2e81d6a99", - "format": 1 - }, - { - "name": "plugins/module_utils/hwc_utils.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e644ea3ca36f30764fcaa369072fb87e3c7d9b0188b8570b0902fbdbd8bcc010", - "format": 1 - }, - { - "name": "plugins/module_utils/ibm_sa_utils.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "606f1d1dd9ff8a739c706157de668ea6df79273aa6545509645eb4791032cc70", - "format": 1 - }, - { - "name": "plugins/module_utils/ilo_redfish_utils.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9c8f188c3f2e6143eaf92794baec80f9c63823daf4baee92d6336b15396b6be7", - "format": 1 - }, - { - "name": "plugins/module_utils/influxdb.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fb2d2b67ce38172c9265adbebc86590a2f1bf36a348f1d17bc179064de838044", - "format": 1 - }, - { - "name": "plugins/module_utils/ipa.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2db62c7f034d2d5b085cdf375bfc010b7135a33556e207941bd14be26edb6352", - "format": 1 - }, - { - "name": "plugins/module_utils/known_hosts.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d6968081e28f64f7c22c48804c0990762ee18d77f31c46dbe1219391670e6485", - "format": 1 - }, - { - "name": "plugins/module_utils/ldap.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4b8a7bdb65f124dbe79c02b8bb4592a72f42ca059c7f43a451730b79a82b3120", - "format": 1 - }, - { - "name": "plugins/module_utils/linode.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "835ba5286602cb6678f067f4523f1b884e6ba588e52de900b08bacd8bfd41884", - "format": 1 - }, - { - "name": "plugins/module_utils/lxd.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a84494f0f3f7053e3c0ec2edda3dae59bb1b543d0d459291c4b9701e12c5d672", - "format": 1 - }, - { - "name": "plugins/module_utils/manageiq.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e2f385d2563979f768448c3c902d3aaad71c6f5e85f8dcc6aad36c5ce361c6fe", - "format": 1 - }, - { - "name": "plugins/module_utils/memset.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b699e7d7a3189e0d0085a39fdc164c9bd31862be047d3221014eeb8eddb2d07d", - "format": 1 - }, - { - "name": "plugins/module_utils/module_helper.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6bf2d8179c5353f2df6628282d37075b7ef71912ea56ec7ff56c296c3c481281", - "format": 1 - }, - { - "name": "plugins/module_utils/oneandone.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6a41305aac71c57950fe1139e541f4913009e90fcfe097062bf6370f95ae54d0", - "format": 1 - }, - { - "name": "plugins/module_utils/oneview.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3f15b7e3b0511dfdc47abb0844e355af8e0e6d274c4151ff6285460af9baf3ac", - "format": 1 - }, - { - "name": "plugins/module_utils/online.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b33524e4c4ec9a89b937e3f98834af94fbaa42d710d759b8c127240d2c034e68", - "format": 1 - }, - { - "name": "plugins/module_utils/opennebula.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1ddadc44b0351b1e71914fc01170ca17558e156e06ee08131e881e797d939dd1", - "format": 1 - }, - { - "name": "plugins/module_utils/proxmox.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a758afd9dce948cc94205d44537689ac5304a1ad5b6c0a720edfebe2c2b7ac5b", - "format": 1 - }, - { - "name": "plugins/module_utils/pure.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "60df81c9c2b060371eec008cff84670b87c339a4d414f3ae55f55ace604f5d76", - "format": 1 - }, - { - "name": "plugins/module_utils/rax.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "20cd4c7581094c22183254263ede05d70e44d915ea54e98c39be0f7bad2f7f9b", - "format": 1 - }, - { - "name": "plugins/module_utils/redfish_utils.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "292bbafb6fac2bf23c2cc28426701c2d0d55ec6065914ff2655e3b1564805cc9", - "format": 1 - }, - { - "name": "plugins/module_utils/redhat.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "19c5d942349922f542caaa782e540ca036a9119dab63577ffc6f88f2e0f9151b", - "format": 1 - }, - { - "name": "plugins/module_utils/redis.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9157e11b9949e354cadd630919844e4e8bb89f542b239598dc8385fe2566e58a", - "format": 1 - }, - { - "name": "plugins/module_utils/rundeck.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "753a1aa0bea535ff317f43a74d9f636871a505db8533ade49b3505bc49290609", - "format": 1 - }, - { - "name": "plugins/module_utils/saslprep.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bc03619992801129d5aacabd6caca116a11077a3270d1a8934213a0f870725af", - "format": 1 - }, - { - "name": "plugins/module_utils/scaleway.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cb222ed5ba99758851c55db8678d4516bce88c2635daac6455b228c0428fe634", - "format": 1 - }, - { - "name": "plugins/module_utils/univention_umc.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b2485af98b53118b37e49002e8da0a69fa91d397b440e108304edf6729b80080", - "format": 1 - }, - { - "name": "plugins/module_utils/utm_utils.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "22494870f3b67a64b503bfc8fd7d71fa1a12b903d7e2ccbcb242b2ab4c72a06c", - "format": 1 - }, - { - "name": "plugins/module_utils/version.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6f8a9655670ac2b2c3e2aae452324c5f283721bfe5bc971957ac5836bebad61b", - "format": 1 - }, - { - "name": "plugins/module_utils/vexata.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c303edbc99d8703b5a185586b36b6dda7da695f60b8939be16ed9a314471ccd5", - "format": 1 - }, - { - "name": "plugins/module_utils/xenserver.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "36332210967cd3f4aa4a29b79653319a9f513a3583779dce8af9e25f9f5cac8e", - "format": 1 - }, - { - "name": "plugins/modules", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/aerospike_migrations.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "52c1641f7f943c727a0d6b8eab2b292b010d9347f28396adc4e8c75159dbb08f", - "format": 1 - }, - { - "name": "plugins/modules/airbrake_deployment.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6aa925fba8833cbaa4a23775684646db31a7f1410c4688392ced89db20bbcade", - "format": 1 - }, - { - "name": "plugins/modules/aix_devices.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "977386dee01ac51d9c885ecee657e0a24df1b5de87996f0a9c9f8c3d0605c08a", - "format": 1 - }, - { - "name": "plugins/modules/aix_filesystem.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "292ff33ccfbcaaf28dc4cd67f6b749dc6b06ae1aa72db436245d348946c19bf7", - "format": 1 - }, - { - "name": "plugins/modules/aix_inittab.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1e4b6091b24210a657d58c1767107946ecdf34f90cef0460762144b8cf6d4cd2", - "format": 1 - }, - { - "name": "plugins/modules/aix_lvg.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "633b5243b9ea9b21d80f381a9698f140586e3a39310d21fb83ef8b5aa0d350cb", - "format": 1 - }, - { - "name": "plugins/modules/aix_lvol.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "feb995da59928c227261390532e549999f7a27594f09744529878c91b72e7bea", - "format": 1 - }, - { - "name": "plugins/modules/ali_instance.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6273f052fa89f9ab9a27230eee5064a37333af680e24ba1d5a715ec11e83c980", - "format": 1 - }, - { - "name": "plugins/modules/ali_instance_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "34c5d0b44fc32a43160e9c62290e1afecfe73481f22b9a9ce8b444c4517112de", - "format": 1 - }, - { - "name": "plugins/modules/alternatives.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "507ab83ed8cc3718318b5de58d67eb743ad0318eab406441eaefd01a5eb18dd1", - "format": 1 - }, - { - "name": "plugins/modules/ansible_galaxy_install.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f7662c68a2cd0beb854eb1cb47411a4b5bf7004acfa0cd101898aba88c0afd6a", - "format": 1 - }, - { - "name": "plugins/modules/apache2_mod_proxy.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9d5fe445448cb9e4605eb0fe5c84e599ae353ecb8a256729b0510392d4fbbc4e", - "format": 1 - }, - { - "name": "plugins/modules/apache2_module.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4dbb4a1e3308a693aaa3101faa828015f66a6a65e040cf3a9a2eee417800d6b0", - "format": 1 - }, - { - "name": "plugins/modules/apk.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "066665788179692795453db9675607e9c400f214f80382fa1646c0a5c4e0b709", - "format": 1 - }, - { - "name": "plugins/modules/apt_repo.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a92bdffb40fa2bc8fc8e6954573fccec4a94a8a23884dcee4f680ddec78880e2", - "format": 1 - }, - { - "name": "plugins/modules/apt_rpm.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e8b8b0d6893fe18ae148144e7ce1e816a07cd760ef60511dcb230c0559b4e433", - "format": 1 - }, - { - "name": "plugins/modules/archive.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3a0715d0aae4143b1f42dc73f560afbfa85782c37ef1645840e27400da7534d3", - "format": 1 - }, - { - "name": "plugins/modules/atomic_container.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "13baf1b70fda761f06be5d8de58290518bc8707287af37fe1af641284fb504a5", - "format": 1 - }, - { - "name": "plugins/modules/atomic_host.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ef3911802c6f970e9014cb8fd849be9df1f8e897876fc9cce03cd66e7d3a2e5f", - "format": 1 - }, - { - "name": "plugins/modules/atomic_image.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "dd25dd2258096e58d9d2873a382e9e5f530cd6224d74325c5466a829f9f6c5e2", - "format": 1 - }, - { - "name": "plugins/modules/awall.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "63f6d1714ac308da87c08e54b17fc2205f0bf2426d26914061074317ae835b8c", - "format": 1 - }, - { - "name": "plugins/modules/beadm.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "07a418d4d0b40c72721627f7c49bc9f2e6c780247e9f101bfa57c79bf18bbf6f", - "format": 1 - }, - { - "name": "plugins/modules/bearychat.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8f224a3485783e66fbde1636e5131e561fd1a9006ffe2ec5d24188c07736f5c8", - "format": 1 - }, - { - "name": "plugins/modules/bigpanda.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "dcc88a1f79d5f53d3fe5e69d911a01177f063a9aa52428c22b4564d306f35ec4", - "format": 1 - }, - { - "name": "plugins/modules/bitbucket_access_key.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "36c0e727d4cf7e57a1ccb7f712ca472f3ed20a8c0b5afa656c9461d39b948ce1", - "format": 1 - }, - { - "name": "plugins/modules/bitbucket_pipeline_key_pair.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c4b8d0fe0f4ada9e881cc1e76e9365bbac7d35f0650235b9033037482d1e5670", - "format": 1 - }, - { - "name": "plugins/modules/bitbucket_pipeline_known_host.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "dd5b27ae648269aab81d3ac46036fc6288781c2a77c02db480ea66ba1bc1445c", - "format": 1 - }, - { - "name": "plugins/modules/bitbucket_pipeline_variable.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3409614c64334e483f093a3f094fab692d09aaac0db65da0225337e4db2993a0", - "format": 1 - }, - { - "name": "plugins/modules/bower.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1469648267092280b084c97ff84b89cd29656ae25f5c12b23d6a34d6bd21f214", - "format": 1 - }, - { - "name": "plugins/modules/bundler.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b8afe9744c027374c7bb7fce88ed55069f27cbf040447a5f0f04a04b9053012b", - "format": 1 - }, - { - "name": "plugins/modules/bzr.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "127a4d24fb7ecd0ae8286c7f1eb5332ca2e3217e7ac29ed85c1e814eb7cfeebb", - "format": 1 - }, - { - "name": "plugins/modules/campfire.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d22a3da654653ddb964eb55db9164c254860f4430dbe8b505b6945f220294bea", - "format": 1 - }, - { - "name": "plugins/modules/capabilities.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7d9e46ddf9acbb7caa0bf526654e9b199abf60e253a551d9f10c4e4673fd6713", - "format": 1 - }, - { - "name": "plugins/modules/cargo.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bba289036c8d3d677f768224f9eed512badd2d001089ab783be6f5a8f5e868a5", - "format": 1 - }, - { - "name": "plugins/modules/catapult.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f1bc195bce4b7de9e4e5c612fba7c422e104af61e77d79860c7dfa69b8b0f15e", - "format": 1 - }, - { - "name": "plugins/modules/circonus_annotation.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "57172616325c7ece221ed3f154e59473f1bfe52c802dcaf0fe0f870133f185b8", - "format": 1 - }, - { - "name": "plugins/modules/cisco_spark.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9b91160d8b53c538dbdeeb45a5584658fcd1a4c57f43ba8a3237a91860a99b02", - "format": 1 - }, - { - "name": "plugins/modules/cisco_webex.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9b91160d8b53c538dbdeeb45a5584658fcd1a4c57f43ba8a3237a91860a99b02", - "format": 1 - }, - { - "name": "plugins/modules/clc_aa_policy.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "767f1e863c93bfe0e8d3bb37d7a029384caec1cf41eebde2c6ce60a864feb5c3", - "format": 1 - }, - { - "name": "plugins/modules/clc_alert_policy.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "45e07b52737a3326a3debf36f5d38fc1fa33503b8fd7156f5f1fb19035a8f379", - "format": 1 - }, - { - "name": "plugins/modules/clc_blueprint_package.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "52d3398cae86c645575a688a7f9dccccbd60b51d69743fdf2e64be70535c75e8", - "format": 1 - }, - { - "name": "plugins/modules/clc_firewall_policy.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ef30311f37991878811921a4ece22412e4c94e92527e9d93d2f761efbfca658a", - "format": 1 - }, - { - "name": "plugins/modules/clc_group.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "636a3b3a90bb1d9fd744e2a22f3ad42a6a372df6ffd9f2aef92e606391ecaee7", - "format": 1 - }, - { - "name": "plugins/modules/clc_loadbalancer.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "87e5dace3e225dbd78b375a034bf5b582a4af0ba05b9276b1bf92caa61a8f5d5", - "format": 1 - }, - { - "name": "plugins/modules/clc_modify_server.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "250d51c8692ee01ef2b75c9da4327adeaf79934aae75a942c45807a66ea9de62", - "format": 1 - }, - { - "name": "plugins/modules/clc_publicip.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b183d61dc5fb36caf1424935c1915fe087322d608bcfc0211a84b56053e0555e", - "format": 1 - }, - { - "name": "plugins/modules/clc_server.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6c7b6c85a2f14f4caab7d170ea0204f87428a5116e21eb8dffd4bcee26540111", - "format": 1 - }, - { - "name": "plugins/modules/clc_server_snapshot.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8dd73687f3268d52da21504f88fc735fbf4a0761655db9693486a46b24263a16", - "format": 1 - }, - { - "name": "plugins/modules/cloud_init_data_facts.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a45eaa4abec3de3c7d4f0bc9338ed79308b522c2cca5496671da197901688986", - "format": 1 - }, - { - "name": "plugins/modules/cloudflare_dns.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "92ca2752e2212e77e6cc3a089a6a72f2a20983ebed40c8edf0e1ceaf18ace10a", - "format": 1 - }, - { - "name": "plugins/modules/cobbler_sync.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0a69b0d481ff28ea1a5d848fa8b80f9a07a4ccf3a50b3fd384b588d0184a31d1", - "format": 1 - }, - { - "name": "plugins/modules/cobbler_system.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b4d8ac045e7b8cfadaea593081d4e6bd815492162d6a0a105041563e593827f2", - "format": 1 - }, - { - "name": "plugins/modules/composer.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7f2740d5b0c235ca97fd503e4441274bc748d4c5b0dcbe3e227831599f573734", - "format": 1 - }, - { - "name": "plugins/modules/consul.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4118f4c040b4c3255e9b585aef388871098bb6da386ef3dfb6eff2a62621b7d7", - "format": 1 - }, - { - "name": "plugins/modules/consul_acl.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b6f145e052de83a3d5fcdb12fcc783b7c14b42be19bee84b021e28bdd5e4d2b6", - "format": 1 - }, - { - "name": "plugins/modules/consul_kv.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "470aac4466c9a747514dcc73b3c50cbab8649050de192563f35d0054820d60ae", - "format": 1 - }, - { - "name": "plugins/modules/consul_session.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cc7f0c964b98a2bd770173babef63981ba77fdba3581f31d844caa7aaf2fe723", - "format": 1 - }, - { - "name": "plugins/modules/copr.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ee22d4a8ae70df45b23c47432192ba596568b8ff2ddb225c7c7908b08f316c5d", - "format": 1 - }, - { - "name": "plugins/modules/cpanm.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "869b73609aa1f1ba8f2d33ccfed04eec450bcdcf31b710526f2d043aa97c0ea4", - "format": 1 - }, - { - "name": "plugins/modules/cronvar.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "14583a0612a939471168bd5d59e7edac48bb01d024aa0d0fc7cdeffd0e923178", - "format": 1 - }, - { - "name": "plugins/modules/crypttab.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d020cd305a432f0da349b1243d96fba57a3290b456016dbf7480cf6ca3dd9e92", - "format": 1 - }, - { - "name": "plugins/modules/datadog_downtime.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c4671fae964f84c50e802b97fc64b2fa39173f787741887a6772d6a300184b69", - "format": 1 - }, - { - "name": "plugins/modules/datadog_event.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "203ee66689572ae405f692c6a34b24d12da75ef835feaf512ee25f179e204077", - "format": 1 - }, - { - "name": "plugins/modules/datadog_monitor.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6c1c03834a375f842171002ac31ef4204c4830eb41283263b954704e23353d66", - "format": 1 - }, - { - "name": "plugins/modules/dconf.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ca342ed1e3cae2da6bc5ee31e05db30f23344f75e4c68a06f577d24ddde2347a", - "format": 1 - }, - { - "name": "plugins/modules/deploy_helper.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d29a73dd509521790e2dcfde24498ea2967bbb5a4c659d26c8a91f41c1cc231c", - "format": 1 - }, - { - "name": "plugins/modules/dimensiondata_network.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4adadccb01c1cef01fe7d330d031c733cf61079bf28f82cab9f260d02355eb8a", - "format": 1 - }, - { - "name": "plugins/modules/dimensiondata_vlan.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b30817b9ad59ecb496117d3f53cae29c288dc7307f0ea100b7a01f73dfeb998e", - "format": 1 - }, - { - "name": "plugins/modules/discord.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4526e01b8b1989fa6bd10ad53702eb0115d7e9d213caa2ddca59d86b521af84d", - "format": 1 - }, - { - "name": "plugins/modules/django_manage.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "be65d011c47d6222a81d1b82af3f9e2cd5853f174c60494cfcc1930009e315ba", - "format": 1 - }, - { - "name": "plugins/modules/dnf_versionlock.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bb392c313d8a04369b834a4320c70110311fc1feaef6d58852659dacc682d6d2", - "format": 1 - }, - { - "name": "plugins/modules/dnsimple.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0dbb97d863fd4a2fff967c39ea1ea12c18f525db25090b6de23239a7ee1e859e", - "format": 1 - }, - { - "name": "plugins/modules/dnsimple_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cd640688d78766e01ab5ff644b82807ee3af3114a8195a482a7f8a6773a32d64", - "format": 1 - }, - { - "name": "plugins/modules/dnsmadeeasy.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a4e6ee3395aa9b100b5f9e0e66bb721bcf9688822833ca3f821d977027961c66", - "format": 1 - }, - { - "name": "plugins/modules/dpkg_divert.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "83eb8748719f999e73a1e00bddc2ad0c4fcff0da7d1771feba9e7d1402f260dc", - "format": 1 - }, - { - "name": "plugins/modules/easy_install.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a11e3e151595b9b729431aa2a4be23edd5d228870b3876cf95160d4552e2ee14", - "format": 1 - }, - { - "name": "plugins/modules/ejabberd_user.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "92c3d42c1eb1126af9f9bb8c118c0a08f28f599c057a03a254b03e76b370614a", - "format": 1 - }, - { - "name": "plugins/modules/elasticsearch_plugin.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "09a6283b244e18cdd17f34bcbf8dcfea1c85c7aeba635e033e4b1d7475f4d484", - "format": 1 - }, - { - "name": "plugins/modules/emc_vnx_sg_member.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bdf6c7c0da78522f40ac8678ad94e2088374f137927b412b36c5b538fd257453", - "format": 1 - }, - { - "name": "plugins/modules/etcd3.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "eca366113dd69573ccb5c95250ceedfbbec34523cc23ddb2406e3ee9bab01e75", - "format": 1 - }, - { - "name": "plugins/modules/facter.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e9dc303791af31b7355e612dcde7b32ecaa6083514c401a900c1bd6c5da5c616", - "format": 1 - }, - { - "name": "plugins/modules/filesize.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "181ff76460418648e0b4dd3906d3d7699eb7ebe08eb2b532aa57a295ac06237d", - "format": 1 - }, - { - "name": "plugins/modules/filesystem.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "00db45139f32500f03fdb8b276664e856ee2bbd3e48e225d0bc5d3ab0adaedc1", - "format": 1 - }, - { - "name": "plugins/modules/flatpak.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "77856cfeb650ab5930a8af1eacf9b87d3c654c0041c713daf6b3f6fe85c4a9ea", - "format": 1 - }, - { - "name": "plugins/modules/flatpak_remote.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0694a7aeb1878ffe91f91625b645d9fb6391dae6e57bff17dd106c83c6e9505a", - "format": 1 - }, - { - "name": "plugins/modules/flowdock.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c50deeb4589cfd2ae9055e2ca708acceaf41f8c4e705a2f3c84bc4d5093bda9e", - "format": 1 - }, - { - "name": "plugins/modules/gandi_livedns.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "93cbd36bb0cb57ab866445984eec096389e81449ede51e141b22284eada70326", - "format": 1 - }, - { - "name": "plugins/modules/gconftool2.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6e5a59c14afe686e07a8595a7f102e632ee78d2dc90749bd147e87b8906ef113", - "format": 1 - }, - { - "name": "plugins/modules/gem.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2658234014600b059931be2658b92731a7b317a49ad8b87b7a90f4021d2b92af", - "format": 1 - }, - { - "name": "plugins/modules/git_config.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4df0f064e3f827b7af32547777bec982cf08b275708cd41bf44533b57cfefcb6", - "format": 1 - }, - { - "name": "plugins/modules/github_deploy_key.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3d942e6c9a4fc0c0b2ab2b6cfcbb2067b044956b0cc8e3a4eb8908fceeca4308", - "format": 1 - }, - { - "name": "plugins/modules/github_issue.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c71ba6cb604c76b2200e68acff20cf55e167b5fbc111aa68a6efd0b6b0573977", - "format": 1 - }, - { - "name": "plugins/modules/github_key.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fe0c5fe85830fe7c1bfdcf99cdbc14af5366e29b04eeed1cf551092734279801", - "format": 1 - }, - { - "name": "plugins/modules/github_release.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a0feb5df29b4556ddae70b101a78da6127312803680504c61739b57b4008037c", - "format": 1 - }, - { - "name": "plugins/modules/github_repo.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "46c5064a6ffa00ff6971115414370a5e49a5dbcef106f18c16a89428e6691fe0", - "format": 1 - }, - { - "name": "plugins/modules/github_webhook.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "781a9ccef57e174ddfba6f794b147aa941b53959652a3fbfb9c38b37d4dec4a1", - "format": 1 - }, - { - "name": "plugins/modules/github_webhook_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0f2d091ba64877de90900c03df4412db8b71393e0d5a742202feda625c05398a", - "format": 1 - }, - { - "name": "plugins/modules/gitlab_branch.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "922b6c30c67ddb2acf0d28aaa9ab16dce5b1f6ad270223ec6773ef680e35c746", - "format": 1 - }, - { - "name": "plugins/modules/gitlab_deploy_key.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "43f0d1631cc651c15a935e280f31677805aae6efb6d80b95d21511b8fe4f79ea", - "format": 1 - }, - { - "name": "plugins/modules/gitlab_group.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6f566f0df7ea3a6d02b4fe0e8550d06400ac926d3d6a24975582c680d3a52528", - "format": 1 - }, - { - "name": "plugins/modules/gitlab_group_members.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "10e9d62d1291f8ca28d2dd9d40d67a10028713c53530f516490edfb2187d3644", - "format": 1 - }, - { - "name": "plugins/modules/gitlab_group_variable.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1394fda09fbc289cf2716876d6a5463889abeb5d2ceea2915235dfbf29aa4684", - "format": 1 - }, - { - "name": "plugins/modules/gitlab_hook.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bdce5a96cd31d9444b1841eb9ee396683c70ee3eb50634d2f02c38ce07b374f6", - "format": 1 - }, - { - "name": "plugins/modules/gitlab_project.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7ba5e593304a1bb3dce94dab2cc62470a892eb3a039b1e6f99a95869d59c093b", - "format": 1 - }, - { - "name": "plugins/modules/gitlab_project_members.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1a3075b6dd2783cf000979cdff99bf7b4f785802ed9e6e08002f629cc1a8efa9", - "format": 1 - }, - { - "name": "plugins/modules/gitlab_project_variable.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "48faf16faee67ab8516ea6b0b7052cc272208325f8c8602c2f013b4384d2eef9", - "format": 1 - }, - { - "name": "plugins/modules/gitlab_protected_branch.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "95ed01ee57390473707b05542cd73dfbc4ff729c5be435222d74ec4b16502435", - "format": 1 - }, - { - "name": "plugins/modules/gitlab_runner.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "63967e029ff266796082e00ef8263369f5a684b01213308f62d35be1d8c65926", - "format": 1 - }, - { - "name": "plugins/modules/gitlab_user.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ff0e35d6b34eb457ba640265b41f35bb6fcf335328eb3155f6e3318f12067dd3", - "format": 1 - }, - { - "name": "plugins/modules/grove.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b743647c9e91e766f9d75ca332fce7f1ee2d53f1a60c25e30aa1da8c54fc42fd", - "format": 1 - }, - { - "name": "plugins/modules/gunicorn.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4c0fc574bc49deaa348708e90945d2b44c5ec61d22f3919022bdc67c105666cd", - "format": 1 - }, - { - "name": "plugins/modules/hana_query.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f0503130e11a7444e652e67b08fce9b7ae64fe7e14b201857822558538274387", - "format": 1 - }, - { - "name": "plugins/modules/haproxy.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e406159197e286963c9b16223af8602f7347cb22dc6f02345512b8ab2e1ddc38", - "format": 1 - }, - { - "name": "plugins/modules/heroku_collaborator.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a540ae7b336b9ceb5b55d841ae1c8aa86b43da70501a51a7eafd576c59a888fe", - "format": 1 - }, - { - "name": "plugins/modules/hg.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "142f44f349abfc23bfda7f9f2df47d160f2a97446d7d5d31749fd5eab7adab37", - "format": 1 - }, - { - "name": "plugins/modules/hipchat.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "46ca51483cbd2b779fba4a7a938d4b2e4088eab98423a196588dbf5c83287e90", - "format": 1 - }, - { - "name": "plugins/modules/homebrew.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "066bf7706d89a85f64b0cf890adc84f4ec37b23291b883c12c73e5b2b80a5c03", - "format": 1 - }, - { - "name": "plugins/modules/homebrew_cask.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2512568adbfbca7a18574b57f68cdf599ea10b5deabab628182ad98c4a71836f", - "format": 1 - }, - { - "name": "plugins/modules/homebrew_tap.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f1d8e1a616a2527b3677f208677e9a1261330777aba1acffa03f093d84f2dc84", - "format": 1 - }, - { - "name": "plugins/modules/homectl.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b770717fcdd6ce98d6b74d1d050fe20ab9278e7a4d2862882afef34ed3938feb", - "format": 1 - }, - { - "name": "plugins/modules/honeybadger_deployment.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "291189d8cb646f5837e39daceeebfd8e54b4f806430deea58c4d54eef50ab709", - "format": 1 - }, - { - "name": "plugins/modules/hpilo_boot.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6d0d47b799f9e444207ed5b4667356cee1de57f1d2aeff137aba990ef08beedd", - "format": 1 - }, - { - "name": "plugins/modules/hpilo_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "293b316839408346f2c2c0123d90b40c8f609e82a12246c202bc3843fc811d80", - "format": 1 - }, - { - "name": "plugins/modules/hponcfg.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cc4939e4db789e57dd8b72fa79789b5f5004b98b3a3e4e5ad2a1ab370d6ce274", - "format": 1 - }, - { - "name": "plugins/modules/htpasswd.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3a9e50c4e8fff4250f074d11041a587ae773629bc33fd8082a1c28c68c99c1b0", - "format": 1 - }, - { - "name": "plugins/modules/hwc_ecs_instance.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "89845b03caeb5d8bc17443300b889399ae73b4da9df2d1404c1d9c09f042ae8e", - "format": 1 - }, - { - "name": "plugins/modules/hwc_evs_disk.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a74a926cd9e503aaebaa3a77d5e80dbba7e42c4c4a92f9c7dbcd147dda363714", - "format": 1 - }, - { - "name": "plugins/modules/hwc_network_vpc.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ad8ab2a633dea8a8afe36d610bd108ec2d8455632452935ae7d32b49b9f9cb4d", - "format": 1 - }, - { - "name": "plugins/modules/hwc_smn_topic.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "43f61a1ef273853a04a5a24138bd7f4d716d3892ba456b9d38a352d682fc26d8", - "format": 1 - }, - { - "name": "plugins/modules/hwc_vpc_eip.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4006ecd981645492fe82a37ea0910a40aac3e24e0e1503a046afa52e42e614a1", - "format": 1 - }, - { - "name": "plugins/modules/hwc_vpc_peering_connect.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d0eca5c552649fd19228928b85cf91670abd2122fd7a6afae49c91f7d84bae03", - "format": 1 - }, - { - "name": "plugins/modules/hwc_vpc_port.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0981c5ad00e6719986102308ac2745eb5d316fd7e0785ebc236102ad9c987ec7", - "format": 1 - }, - { - "name": "plugins/modules/hwc_vpc_private_ip.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "778aea0f9e96d24c7c51afdf7eb50bdcda5690d2ca1f10511ead89a47c30a116", - "format": 1 - }, - { - "name": "plugins/modules/hwc_vpc_route.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4369f9a4cfa48a82a66435bf9ebbfcd9a19dd8c91aaf1c5f6684fd33b5c5103e", - "format": 1 - }, - { - "name": "plugins/modules/hwc_vpc_security_group.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "49f9184ecdc9dcc89addc51cd8490746fb3a54089d403f4fb1c64a6f7516f264", - "format": 1 - }, - { - "name": "plugins/modules/hwc_vpc_security_group_rule.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cd75294789234ffe193cfa2ff95084fb3edb0de2a42d9a20309db99bab189997", - "format": 1 - }, - { - "name": "plugins/modules/hwc_vpc_subnet.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3e5ac97a4be19828a95658766474adba0d1b9c4f2bb2dff454cd4bb3aa821480", - "format": 1 - }, - { - "name": "plugins/modules/ibm_sa_domain.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "846c2e2161c51130505d8caeef87178eb8cd40b5fe42d9f9c6649b444f0d7c7c", - "format": 1 - }, - { - "name": "plugins/modules/ibm_sa_host.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "42574cb0750d740dcbf3dc300cca235b15a22ecb00f79af5aa7818a494b60366", - "format": 1 - }, - { - "name": "plugins/modules/ibm_sa_host_ports.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "dc5ac76115dfd50d5b8b37aa9de8c75824e6354a4aa925a171a364dd0fe60fbb", - "format": 1 - }, - { - "name": "plugins/modules/ibm_sa_pool.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a1d51e21c6dc90ebea2e67c86200aa7c28b8451bd09c35cabdd5d53123cc1b35", - "format": 1 - }, - { - "name": "plugins/modules/ibm_sa_vol.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "44582854ca8e702de67f555704e9d3b007ece65d723bb24536a567e9e7031757", - "format": 1 - }, - { - "name": "plugins/modules/ibm_sa_vol_map.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7a90662d294fcc853121b02134446a6ae10c430a5caf3ebc0766de0cbba6479a", - "format": 1 - }, - { - "name": "plugins/modules/icinga2_feature.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "770edfacd0187f36c9bc94fc88df9fbe51dc29ae1dab5065dbcbd0b0043a089d", - "format": 1 - }, - { - "name": "plugins/modules/icinga2_host.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "46b696ade815c4a19e928de8ca0ecdcfe20754bf55cd1f5ace8554daaded778c", - "format": 1 - }, - { - "name": "plugins/modules/idrac_redfish_command.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "879b3d5825eb59bc67aea7014006f58df64853f8bff388fbb2b7d0bcb67b71a7", - "format": 1 - }, - { - "name": "plugins/modules/idrac_redfish_config.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "24cbee078205ddcf90266adaec93635a38384d7f3ea4db3a8e0adef7e69b05c9", - "format": 1 - }, - { - "name": "plugins/modules/idrac_redfish_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "820bb9a147f15fe41bffc5567f699b0f000db2869f2ea268f8e630250d95bd42", - "format": 1 - }, - { - "name": "plugins/modules/ilo_redfish_config.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d8546cfb15f05947f7c6760cb5d67928253269aa18102155f600995d3598b739", - "format": 1 - }, - { - "name": "plugins/modules/ilo_redfish_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1d175b3b05e25ed30302b1ce7994099a19b07709201c864ff37f210aa7df96ac", - "format": 1 - }, - { - "name": "plugins/modules/imc_rest.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e51c5d1375a1a9f469cfc28140144116cb29c3bfa35c459708f6ac76895340d0", - "format": 1 - }, - { - "name": "plugins/modules/imgadm.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e7bfa8f3eb4edeb4f1f9e51a4a2c5f17a4390513ff3f2375dc78ab27e5352208", - "format": 1 - }, - { - "name": "plugins/modules/infinity.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "639c7ed7633b97041cd61f657ec7d60d28db516cab49fac6c0cfec5a01c013de", - "format": 1 - }, - { - "name": "plugins/modules/influxdb_database.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5f7f03aa049ab52e4dbfb809c86a65d026f518047de475693616d52a611090cc", - "format": 1 - }, - { - "name": "plugins/modules/influxdb_query.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3a8b781c48ea54c78d2a8ac358ccb5f901746e79b0d0da842b5d06068ce6b1c8", - "format": 1 - }, - { - "name": "plugins/modules/influxdb_retention_policy.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "00fba18126835c5c2e9e79ad1a3e0fea04613c9718839ce304bd5fe48a0450de", - "format": 1 - }, - { - "name": "plugins/modules/influxdb_user.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6be29679e39cd622bb5eeaec56a6d802992a2e76a66a1058d478fa72ecef3db2", - "format": 1 - }, - { - "name": "plugins/modules/influxdb_write.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f5e2d773ee043f148680048a538b3a61d529ea7628b431149ca7f8c51057dbf6", - "format": 1 - }, - { - "name": "plugins/modules/ini_file.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ca49a57202bf72b8b079bbbcf5cfd3e33e530e549bd1ca1626f328a11b8b2839", - "format": 1 - }, - { - "name": "plugins/modules/installp.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1360ed768c621c482767cb1994d96e93827b55a20da4d3f2cbcfbdb5278f9c18", - "format": 1 - }, - { - "name": "plugins/modules/interfaces_file.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "25e134950671398223e77965d70780612354f1f321ef3b196377b8fe734adb03", - "format": 1 - }, - { - "name": "plugins/modules/ip_netns.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7703c45b7a46aea0d992130cafc0922dc74d926266b8f908adc15c6eef1cfa29", - "format": 1 - }, - { - "name": "plugins/modules/ipa_config.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8338f390c1e9ac774c095ada6731502c1280e30b01bef293a6651ad54d0bfe8b", - "format": 1 - }, - { - "name": "plugins/modules/ipa_dnsrecord.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "88fd68dcfd0725e575ce7fac94cb8eb9c74024e83bb0eb5dddec34d568725ebd", - "format": 1 - }, - { - "name": "plugins/modules/ipa_dnszone.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9944ce41cae935b07410a1a482d2d4cd1c6f07f7060a360e6888e67992075a36", - "format": 1 - }, - { - "name": "plugins/modules/ipa_group.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "70c065752e9e80713862f8fb3fb85f60219ac80d97a49139288bf6dd335ad168", - "format": 1 - }, - { - "name": "plugins/modules/ipa_hbacrule.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8350663990ec7b9b46879f317760e64e9eb9ad080170f8a3ab66f26022623cd5", - "format": 1 - }, - { - "name": "plugins/modules/ipa_host.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1672d2a59433c0c823dde1d227c7d78caaf492f981d55c6333ba950ba298907c", - "format": 1 - }, - { - "name": "plugins/modules/ipa_hostgroup.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ae6569872367a3b15727facea24ff4322cdf35512b1dcd8c4889997943eeb1d8", - "format": 1 - }, - { - "name": "plugins/modules/ipa_otpconfig.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "dcd17661ce19b040683bbecd506bdb2ec5ed2909c20d71c0a814bb4f05fee345", - "format": 1 - }, - { - "name": "plugins/modules/ipa_otptoken.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ffaa1a58c973d8794d9a1797bd75bccbae783699e1ea87d4bbb7b3ed434d72d4", - "format": 1 - }, - { - "name": "plugins/modules/ipa_pwpolicy.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "91f450bc4c6329e67cdf920e7f8499ffb7d27975b0a548ae2110354ed5e2e281", - "format": 1 - }, - { - "name": "plugins/modules/ipa_role.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "24e469a9d45178e0fbdfb4635f525640cd1033ec559f45978e4ba7cc42fb95c6", - "format": 1 - }, - { - "name": "plugins/modules/ipa_service.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3177e872cdf023c8a7e8bd65bd09e2ac102b2c3565c40ee5dc9d8c0fd8ddfcd6", - "format": 1 - }, - { - "name": "plugins/modules/ipa_subca.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "932c8bd910f72a6fd20831704f96358bfd3b96e94ff8346a09a5c401a27087b8", - "format": 1 - }, - { - "name": "plugins/modules/ipa_sudocmd.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "58d95fc267fc9d319ff05df6aaab1fb39df187d48bed52d497d92a30c54750ff", - "format": 1 - }, - { - "name": "plugins/modules/ipa_sudocmdgroup.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a8fbc39a66b0356ec18f8468789e6d4ffb5a1fae4f0e6d68e8837821d2c138f9", - "format": 1 - }, - { - "name": "plugins/modules/ipa_sudorule.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "15ee194ba2afa0982721aed91fdc69f93aee33b45af426efea615e3a03016f51", - "format": 1 - }, - { - "name": "plugins/modules/ipa_user.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "97c135b60e1aca5fc78d7af59cbf5f5dbe14b0ccd93951bc10450698596c1aee", - "format": 1 - }, - { - "name": "plugins/modules/ipa_vault.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d2ee238e7dab861eec17312d74cd513b493ec69b41e0d225501c8668d61837d2", - "format": 1 - }, - { - "name": "plugins/modules/ipify_facts.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a3cfe7e782b99e108e034ad45b38f3a686bd057c13a405e13b4082c9d4655ba8", - "format": 1 - }, - { - "name": "plugins/modules/ipinfoio_facts.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ffefdf9402a767ea1aa17675b8be1d868d68e71ef5292b26ea0266a856914208", - "format": 1 - }, - { - "name": "plugins/modules/ipmi_boot.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "32bc6fd22d5a4705022af7af389209a8db051bd7994c24e233261bc8188234b3", - "format": 1 - }, - { - "name": "plugins/modules/ipmi_power.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ad505007f78f7588bc403a75c522ef4ff75de4b7acfdee4dfbce33aa29713e26", - "format": 1 - }, - { - "name": "plugins/modules/iptables_state.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "06358c739fcc70ba79d43af924c0f35a6920d8c5bc4292c14f96dd5870b8d4f7", - "format": 1 - }, - { - "name": "plugins/modules/ipwcli_dns.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "27f69f073ce4bd49b82bee81a74f81650a89517936b723a1641f203c281ac406", - "format": 1 - }, - { - "name": "plugins/modules/irc.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5056a0944304be0cb4585231a68496ecfc2df86c3013ba1b398a17d73ece48c9", - "format": 1 - }, - { - "name": "plugins/modules/iso_create.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e22d344094cca4e10a77f281172b99e2ff51c71d16f63db2088d4cb5cca1dcc0", - "format": 1 - }, - { - "name": "plugins/modules/iso_extract.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "45e148bea9a28b93070734fe860f594c56b645deecd5799fcea67e8ac6c8d0e2", - "format": 1 - }, - { - "name": "plugins/modules/jabber.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "38e25af68e39cb333fe7d46308e6798e9884c5df4feb3d99a9b5c55e8a264709", - "format": 1 - }, - { - "name": "plugins/modules/java_cert.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5c40619fd173dfc758e1dbe6ad2083a924a6b138592fb98244b3d7a152dbbb54", - "format": 1 - }, - { - "name": "plugins/modules/java_keystore.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f2b9a344962a24cc2754aa948d60b383fbb21dfb7be36fb4cf2582fdfd896cd7", - "format": 1 - }, - { - "name": "plugins/modules/jboss.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "413a5203f4d159144142272b5e494f10d032d589d31b0d5167b60ab0e5d40664", - "format": 1 - }, - { - "name": "plugins/modules/jenkins_build.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5a70f3860a8a4adf2ab17cc214be4812d8e72fae7ba2a748fbbbe9bb9755178b", - "format": 1 - }, - { - "name": "plugins/modules/jenkins_job.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "289f71c98eae7a1138cb3b922f1b7a431d3cf593ef838ff7f152c5ff60839a28", - "format": 1 - }, - { - "name": "plugins/modules/jenkins_job_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "eb90242a9999203cb2fa1d6af3e9a8c54ad57530e91aa338f00cee8fd7a4b32e", - "format": 1 - }, - { - "name": "plugins/modules/jenkins_plugin.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9f36ba039a959f4ab537e6736021dbb68c50ed10e7ee3eaad03307c5726155e3", - "format": 1 - }, - { - "name": "plugins/modules/jenkins_script.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "194b41bc5b511c44e15b770526dcb63625ec530b963e650343467f12b5a083ee", - "format": 1 - }, - { - "name": "plugins/modules/jira.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "072dfce83798a6ca7fb0c0395e8d8168ca28b140857ef73687bcfc04ebe00941", - "format": 1 - }, - { - "name": "plugins/modules/kernel_blacklist.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "22cb952a459ea253cfd9eaf5d6612dabe02cf670385d9a95e0ad8212b8496b1c", - "format": 1 - }, - { - "name": "plugins/modules/keycloak_authentication.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9c90b1d14c16a6a61e114fcf81cecc8a37c0205d45328b3a2d37e4c26f89bbd1", - "format": 1 - }, - { - "name": "plugins/modules/keycloak_client.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6afcc0997e09859e999b6988fc8313c2b6ab6881593c32202caffb9a00d4e8d9", - "format": 1 - }, - { - "name": "plugins/modules/keycloak_client_rolemapping.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "591f181bff4630f8102b105189ff5b3a13de126520d1d28def344d175527979b", - "format": 1 - }, - { - "name": "plugins/modules/keycloak_clientscope.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5afc1453d8f5360849ee0c3290c0c838f0aada90e1812928e77a1b1e7a5ffd18", - "format": 1 - }, - { - "name": "plugins/modules/keycloak_clienttemplate.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c950ef71abd6035f3861bc568f993b414bf1a24e163c7f486ae529ac5a92cb24", - "format": 1 - }, - { - "name": "plugins/modules/keycloak_group.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "49d81d24c71674584f1a762d4db1f73d7a13ba78fc367f3961e6e2cafe0c5329", - "format": 1 - }, - { - "name": "plugins/modules/keycloak_identity_provider.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2d458b33b61e2972f529be3fc2b9818bc0bb9511fd2ad1833b8d0ee11032261e", - "format": 1 - }, - { - "name": "plugins/modules/keycloak_realm.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6ddd98908cb2d26b7a3627e563b5e8b26335e23d6f8cb7d4675399dc891dd19a", - "format": 1 - }, - { - "name": "plugins/modules/keycloak_realm_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "dd2ffd0fbe413e17ef575a432a2ce8d251d3d634f5dcaaa0b70dfd20d2ba22b1", - "format": 1 - }, - { - "name": "plugins/modules/keycloak_role.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ad5b8b8c78cf44c6309e19858709eea202cb2a8f20f27e85fc3ea9260bd1b80a", - "format": 1 - }, - { - "name": "plugins/modules/keycloak_user_federation.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "100992e28832d1fea678013004dbc8400871bba27af2426c2f240b0eaf4da03e", - "format": 1 - }, - { - "name": "plugins/modules/kibana_plugin.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f9ecdf864136ffaeb96c2239570ef3de82852d38cc6d522cb801590c62d4a07a", - "format": 1 - }, - { - "name": "plugins/modules/launchd.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "287f7a5a7c8d859038ca8c15e7d221a1bce7c56b02942260f135b52229e177b0", - "format": 1 - }, - { - "name": "plugins/modules/layman.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "836e062d867c45bb523e37edfc3cf6b6b9b94700d994f1755d78b706cf3f6bd0", - "format": 1 - }, - { - "name": "plugins/modules/lbu.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7471d902ef679d8cc8dbeb52b2f737758d696777c83c36332214a727ab7bf1dc", - "format": 1 - }, - { - "name": "plugins/modules/ldap_attrs.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "26070ca9bf3bfd37884672ad9335c2a7706298645e84bac4c259bdaab4269f73", - "format": 1 - }, - { - "name": "plugins/modules/ldap_entry.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7c1beee28d7661cce71496558a7a72f3afc3450e92bd5da44c5561192bf34853", - "format": 1 - }, - { - "name": "plugins/modules/ldap_passwd.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6ba81db2b15e61479f3621ea0f9c1ee360a6938388349c842ee7cc39d4affaac", - "format": 1 - }, - { - "name": "plugins/modules/ldap_search.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "27ace47cfda1f029f3fd0f87e80d19d4170df442a2da819adaf29c169e86c933", - "format": 1 - }, - { - "name": "plugins/modules/librato_annotation.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d9f41d406bfe62d78ad1a042c78019c6fd4df50632213dd5a2d619a2e2bcc1ba", - "format": 1 - }, - { - "name": "plugins/modules/linode.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "167488e841b7c5636e0c1695d689ae29de74d3dc3d33e6bcb4001fb0a680f8fa", - "format": 1 - }, - { - "name": "plugins/modules/linode_v4.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d1484d4125d34af5990398d36e479a00da32dd318259f2c686e315503124940c", - "format": 1 - }, - { - "name": "plugins/modules/listen_ports_facts.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5966c7c49a2850b1c13757899a6bd5443a30319f0b6f2628077662fd703df5b5", - "format": 1 - }, - { - "name": "plugins/modules/lldp.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0bebe90d2f24144019108f71e7dedb4ed60ec93abe5e96fce73196192de34afa", - "format": 1 - }, - { - "name": "plugins/modules/locale_gen.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d79413b262062855f9e4d97f7fefebbf5f18504e8d36da6496f20a0626c7b8be", - "format": 1 - }, - { - "name": "plugins/modules/logentries.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "39eda48181ea6b93f08876a2f9db6b3c22693d848dbb07d6f6592a8adda50152", - "format": 1 - }, - { - "name": "plugins/modules/logentries_msg.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "34982c5c0e9aef4d724a068cc3bbb34df2d7e9757d7d2ed620990124d64b9a84", - "format": 1 - }, - { - "name": "plugins/modules/logstash_plugin.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1d84f5ccd70f2dfdfb0f306ed675920972d332cb07b9d1f7997ee9eb16b6dd0d", - "format": 1 - }, - { - "name": "plugins/modules/lvg.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a797ac328651f2c55e0e3f4d09629095014390bd99b82971aa1fced50249177f", - "format": 1 - }, - { - "name": "plugins/modules/lvol.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "faa2fddec92f0bebc7a4536cb716748cadb99d57be46e04faf4f14cb43958e86", - "format": 1 - }, - { - "name": "plugins/modules/lxc_container.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9479e4e14d7c49ddd745eb4ccbafc171fd89db2bad96b711e74dfcb457ca111d", - "format": 1 - }, - { - "name": "plugins/modules/lxca_cmms.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "74ad7330003cfce91c50347b358bea005a2616da70aff5a757bcdd714a3f86a7", - "format": 1 - }, - { - "name": "plugins/modules/lxca_nodes.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "82e905a3d21b63b40414f3ec63dcbd578743c38cf62865ddbe84a5dabb8ec622", - "format": 1 - }, - { - "name": "plugins/modules/lxd_container.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6f9dcc2405aff8a396a200b4a8ad4d9321553631966ddeed9c0fb1aee7f4ca94", - "format": 1 - }, - { - "name": "plugins/modules/lxd_profile.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cc2d195be2a466ba04309725e6b43fff6933ee7fd979fb7be890bbdd7451d55e", - "format": 1 - }, - { - "name": "plugins/modules/macports.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "dbd71696e4f6e58f8d67117c301c32ee210e6765f6b4f7a2a966b64cba91cd16", - "format": 1 - }, - { - "name": "plugins/modules/mail.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d321469472ef8dbd1a0c0c06b67c4213df7a11d487ae18b8962ab1ce7302d36e", - "format": 1 - }, - { - "name": "plugins/modules/make.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b119a10b4ef68686d49cfad00d5c3f4cfec954bce9f86dacbd5011fe2a746b9c", - "format": 1 - }, - { - "name": "plugins/modules/manageiq_alert_profiles.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ddbb9e06f40e750fccf055a42d03a1a80b45bd238d8d4558916c849940b73903", - "format": 1 - }, - { - "name": "plugins/modules/manageiq_alerts.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3410230671e4ca67fb49d62280309a70c8e272ed44b063aa133b9e906b5d9f74", - "format": 1 - }, - { - "name": "plugins/modules/manageiq_group.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ab64599f102c1cbc693aa6a963bfdd0890cbe5c9a556bbb95b4a085bbb354421", - "format": 1 - }, - { - "name": "plugins/modules/manageiq_policies.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "583c115fed4980ab0dd6b7beaf97b8779c5976ed5f212cea213b886f08ea2fbe", - "format": 1 - }, - { - "name": "plugins/modules/manageiq_provider.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f229203632039bdf0e89ee52305065bf2038e8d934a94ae293012da52feda470", - "format": 1 - }, - { - "name": "plugins/modules/manageiq_tags.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ace512b173524ed7af89882fe3912511f1138a58a8ef9f426c56226ce8e120fd", - "format": 1 - }, - { - "name": "plugins/modules/manageiq_tenant.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "99d5ff3a9cc80ba2cb52ac6bcdde27a41e8993d355bae1eea34bf9659e0c7cb0", - "format": 1 - }, - { - "name": "plugins/modules/manageiq_user.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c9c425603e1e88919c2d9245030f2f02c3866337aa4e81eb702dd003d45069c0", - "format": 1 - }, - { - "name": "plugins/modules/mas.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7346067aa024a97e1fa6c3b2bc55a6eb7469b2eea9c8b69daf179232210248dc", - "format": 1 - }, - { - "name": "plugins/modules/matrix.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "49291a2a57c72bea087e2afffade0f7f083deb196f8e32dd6d79955bb5b6116a", - "format": 1 - }, - { - "name": "plugins/modules/mattermost.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e4ca0cd2ff4e27e91ffa8542531dd77413443690721b78e468d723e3c85278db", - "format": 1 - }, - { - "name": "plugins/modules/maven_artifact.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9093a95b922bf4c93af8d371f23f6ec650bc04cb139cbbb3ade69d50b050d5d6", - "format": 1 - }, - { - "name": "plugins/modules/memset_dns_reload.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b84a25907522e6ce4bb42500d5a17d4d532da3de5a6d640fd4fb33a7adb147a3", - "format": 1 - }, - { - "name": "plugins/modules/memset_memstore_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cfa529765f7db308a617550e52b56d21ab49e45003f27ebaa9771b78392abcc0", - "format": 1 - }, - { - "name": "plugins/modules/memset_server_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6817c961286632c4ec868845cb3eb62f5095fd7c48a98dad1678071ab08cec28", - "format": 1 - }, - { - "name": "plugins/modules/memset_zone.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6a5b2527e6602a6e9533c842cf944b71be146787a9ab908eca03de3d97ab6cc0", - "format": 1 - }, - { - "name": "plugins/modules/memset_zone_domain.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "90d015499749fd99206a3f5e435b8bb3c59f971689f33024871a2b18125749c2", - "format": 1 - }, - { - "name": "plugins/modules/memset_zone_record.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0db0abd59574ef77493cc31edd1adf8d644740c6968352f94e58a60ea01534a0", - "format": 1 - }, - { - "name": "plugins/modules/mksysb.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f4d453b498fb00531d86635f21b89e9da427d17788a8dffd624a7eef2d64260f", - "format": 1 - }, - { - "name": "plugins/modules/modprobe.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3d587d82af8364836d095369488fd76b90dea4f4bf068ac96984f50302fc7228", - "format": 1 - }, - { - "name": "plugins/modules/monit.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f0e631c78c8748e568fbc1624ac2831861087b07f88cac56cd995602aeb3fb89", - "format": 1 - }, - { - "name": "plugins/modules/mqtt.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "dc3caa21d09f3103a4c21cb7719ed69522760f9221b536e79ad9f9cc52470d8a", - "format": 1 - }, - { - "name": "plugins/modules/mssql_db.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "10836be6d1f0c2d46a5ad956f66a98f0ee983de1660c462d3220d377a14ce6c2", - "format": 1 - }, - { - "name": "plugins/modules/mssql_script.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fce6238160aaf08763818017d8bd5a211bf2dd8c478daecaa0584166011d58b6", - "format": 1 - }, - { - "name": "plugins/modules/nagios.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8f3d329e518de7d3efb7cc6b8d96dd17f420a22134f61012b605e579dd365a7e", - "format": 1 - }, - { - "name": "plugins/modules/netcup_dns.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "17d6af51c3f484d8415565c30657315387fe7b669e3f7646aa1f5b9ffa444619", - "format": 1 - }, - { - "name": "plugins/modules/newrelic_deployment.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5bab501cf9754d7a6c46ae2977fec718592d45efae4d4cd5a29652e6f76bf33d", - "format": 1 - }, - { - "name": "plugins/modules/nexmo.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "944a2d977cdaf55b8c53861b2ac13ba4808e3e49429be8dea75b38ec028d2b18", - "format": 1 - }, - { - "name": "plugins/modules/nginx_status_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3be0b85c00ec846e372cd74d28bef34f32211231f6c8cf45803285ff76320d39", - "format": 1 - }, - { - "name": "plugins/modules/nictagadm.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "32df37987dc72376f00e17b852b236cb78a6827eddad3459fa8f022eb331494b", - "format": 1 - }, - { - "name": "plugins/modules/nmcli.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e585180782651197b35c000a62b28c94f599beea53c963b4b44a4a4733b9e833", - "format": 1 - }, - { - "name": "plugins/modules/nomad_job.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f340d59640dbfc609d17914eaae66d0abb75aed40548448b92e88b3070c04064", - "format": 1 - }, - { - "name": "plugins/modules/nomad_job_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9fe39694e1781829ce8bd562b30f040127f5e1e2d7a977c82db3202fe0b00352", - "format": 1 - }, - { - "name": "plugins/modules/nosh.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b98560dd3abfba1dc2fe078a56a4eb93bdcb24af42ef6ee70c413dc7f1f9df3f", - "format": 1 - }, - { - "name": "plugins/modules/npm.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2ad403903ddfdb432279a0c91640d2bccc6f9ff4fc017f865f144d0cf12c3fa7", - "format": 1 - }, - { - "name": "plugins/modules/nsupdate.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3ff24f4b701c08dd89733f207803d8e05e37b0ea0d40ea00f3c2b406c94eddb7", - "format": 1 - }, - { - "name": "plugins/modules/oci_vcn.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f40472a5d3fa83672bee22b25f4bb8cd5dc058ffbc68fdd3cac95099e8be9029", - "format": 1 - }, - { - "name": "plugins/modules/odbc.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1a07ed4cd1edfc030bd2bc888c365b50d44955cb82d55a69564f524c42a6591d", - "format": 1 - }, - { - "name": "plugins/modules/office_365_connector_card.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ca2802d019e153833f903a044a08c233555cc5e7476446c6df780b23995bd26a", - "format": 1 - }, - { - "name": "plugins/modules/ohai.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e4272be634bd89295c956ff2215715a967d299b5d1173048d0513cb45dc1f5f9", - "format": 1 - }, - { - "name": "plugins/modules/omapi_host.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "32824ddf8d839bdad9decf1161bcee7301af665604be924c98b3378e13315e12", - "format": 1 - }, - { - "name": "plugins/modules/one_host.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "27fc10fab8637c26999d160cd0a07a6d2785d0884c0ddf6dd64b9167cbe261a2", - "format": 1 - }, - { - "name": "plugins/modules/one_image.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cc5f0a799258a85e6580bb80e5853fe7f17f64d2baa149eb558994f968e62aeb", - "format": 1 - }, - { - "name": "plugins/modules/one_image_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "230859d81cd0cfd8aa3495a6f19de66dc73995a56cd2a7c44fc975c3de94a24e", - "format": 1 - }, - { - "name": "plugins/modules/one_service.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b8800ee2c709981d0fcc213975fa886aa4113b9d7b80846458ddfffd91d75420", - "format": 1 - }, - { - "name": "plugins/modules/one_template.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "aa8c26db525d6ce3ea088ab7f104ffbe900969c5fef2253b11137ec3bfa76c8f", - "format": 1 - }, - { - "name": "plugins/modules/one_vm.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4c2832ad3bd5c28b0c269539286f52c3f0492a52322ca9148335f63b5aac8f4f", - "format": 1 - }, - { - "name": "plugins/modules/oneandone_firewall_policy.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "00bef2b89385e4be0273d6054adc6fcaf48909c8ed439860e4623bef5ea9a262", - "format": 1 - }, - { - "name": "plugins/modules/oneandone_load_balancer.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6c58464049476dc05439d1b53b4cc76c1bc2efe57ef978e96250b227ad6dabf7", - "format": 1 - }, - { - "name": "plugins/modules/oneandone_monitoring_policy.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f43e8dfe07f728583ce6162b1a5981a867bc80ee36577a12c03a330d0c9ede54", - "format": 1 - }, - { - "name": "plugins/modules/oneandone_private_network.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "72d4a6199f1720039795746a96b49e65d755fa00ba4a2a2925abdbfd942927fb", - "format": 1 - }, - { - "name": "plugins/modules/oneandone_public_ip.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f1621773c8720995326fce8e1c59c4c81c82b32ce86aa7f254bdbcea05ff29c3", - "format": 1 - }, - { - "name": "plugins/modules/oneandone_server.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5c210342197346d900dfdd87c9078de8ced7247b82abd4e0ba56a47046729516", - "format": 1 - }, - { - "name": "plugins/modules/onepassword_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d0e2a34b5efebec54d9dce104527972c13fce6c7e04ef25220a8073f4d385d35", - "format": 1 - }, - { - "name": "plugins/modules/oneview_datacenter_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "355d4c6ef338dcf618383018bb1b7a4dff56e8c01f4241a6ddb28b58fa98f4a1", - "format": 1 - }, - { - "name": "plugins/modules/oneview_enclosure_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ba63e68b4e2ce3fbe7cb6e3884ce7f070f6dfdfc4f21ab8f6ccecf32bf4f55db", - "format": 1 - }, - { - "name": "plugins/modules/oneview_ethernet_network.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d2d4ccac855870076ac2e5852e5aba82722d56d161317910c65f0144c9888bce", - "format": 1 - }, - { - "name": "plugins/modules/oneview_ethernet_network_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b9b15514fd1fc3d8f91b83313acddc8dba8063fdc160c015ca0ac326841d3cd6", - "format": 1 - }, - { - "name": "plugins/modules/oneview_fc_network.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3669b6c65a3689dae16737839dccbbe509725ae75f52c55c2bcc935decef6ebd", - "format": 1 - }, - { - "name": "plugins/modules/oneview_fc_network_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8a59e9a708eb32e0bc67eca344d458f20171812bb765f54069e707817d32f3a3", - "format": 1 - }, - { - "name": "plugins/modules/oneview_fcoe_network.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6afddbe7fa11896de1506c9fe82f234b36ca9640483f8c9247e698981bed83ed", - "format": 1 - }, - { - "name": "plugins/modules/oneview_fcoe_network_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a89dc5f2cdc9e48ab64afda2958b7dfe0de623bd09ece5d90309f96c5c82f02a", - "format": 1 - }, - { - "name": "plugins/modules/oneview_logical_interconnect_group.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d8ede8042b1abfffb2b7063e081ab962eeddc3462ba9498c5f777ba7b17aeb79", - "format": 1 - }, - { - "name": "plugins/modules/oneview_logical_interconnect_group_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2bfeeb09917fa930055ad91ab23dfcc98cbb1c638c83fb2a484326527541c902", - "format": 1 - }, - { - "name": "plugins/modules/oneview_network_set.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a2d0b3c12e770373a5ae9dd4e30e20e9199dd5882cce2ea99b8e132e0d73db4d", - "format": 1 - }, - { - "name": "plugins/modules/oneview_network_set_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6ae6c0631e08a394570f300600d4fc4c667e11a0c8c01b52a00b9b73e6be1824", - "format": 1 - }, - { - "name": "plugins/modules/oneview_san_manager.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6f1b301a7bef55541938d21ee1b2dd59d86c8b4fdc7a7ec29c2b66f30afd0e22", - "format": 1 - }, - { - "name": "plugins/modules/oneview_san_manager_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4d0cc20490ea3903961f2ee4ca7c39bae0c3f2935fd71574fa36a62700283a09", - "format": 1 - }, - { - "name": "plugins/modules/online_server_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "660ccee70609be58fdd563f516002d9f966f665367b9033b863572a352e2793f", - "format": 1 - }, - { - "name": "plugins/modules/online_user_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9d17d3d216d502dedc3ce76ac76a9037cea71cca92b996125c376581d6c5fc83", - "format": 1 - }, - { - "name": "plugins/modules/open_iscsi.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "307fc84c58937372a867cbf944d16e3a0606ea44e6699f5782c49c64f3957eda", - "format": 1 - }, - { - "name": "plugins/modules/openbsd_pkg.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9f9274e283af531ea1604d2231d456b443ca118638c24387c285e51af75bb475", - "format": 1 - }, - { - "name": "plugins/modules/opendj_backendprop.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e45d6e5a6145f58dec874da17714d239170c25aa3d6b6bed4e7ab5d45aa92e9f", - "format": 1 - }, - { - "name": "plugins/modules/openwrt_init.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "55836f6f5d1311011d3184178e63629e7b5a5bc28be88818944e5f8ef9ede13b", - "format": 1 - }, - { - "name": "plugins/modules/opkg.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e456e9b6d5a6760dd77954c9c35a50524344c6f381b69a5b1e278a2b51fff048", - "format": 1 - }, - { - "name": "plugins/modules/osx_defaults.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "91214ca6596b68554a16c909bb3e5d232b74218b55b9207102ed672ed70b14f6", - "format": 1 - }, - { - "name": "plugins/modules/ovh_ip_failover.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "10becd6c09fb98b6e8ed7838f7669e233249d51f8537aef736257b2a7ab62d69", - "format": 1 - }, - { - "name": "plugins/modules/ovh_ip_loadbalancing_backend.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f8af6017776a3c0e311a963f790f75705c130f2cfdb3f59e9b090d496d192ae0", - "format": 1 - }, - { - "name": "plugins/modules/ovh_monthly_billing.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "921ff4d415e12a6ddbefc4a19a2d8807a9d7a3b7328c474fca5be64c59db55e6", - "format": 1 - }, - { - "name": "plugins/modules/pacemaker_cluster.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4386ec559c0dd166cb6b6bf4b2f43f3368c2da231653b3f4027d64fb921b1e48", - "format": 1 - }, - { - "name": "plugins/modules/packet_device.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "73d6fe85e58d6598d3c8f264f530ff774991bd76e0cdb84ec521e2b894ec6411", - "format": 1 - }, - { - "name": "plugins/modules/packet_ip_subnet.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3866a31a3e100c8615ae771a933061ead29662d1027b48c3584a5b1097f81b2d", - "format": 1 - }, - { - "name": "plugins/modules/packet_project.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fa9a3e9f17818d2a1485e7ada11b23fff4e8b6c375f805f45a7f57681d0c7a6b", - "format": 1 - }, - { - "name": "plugins/modules/packet_sshkey.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e16a15dca05b676b606f42d23289dd512a7d465f269af8a60b96839cb19709be", - "format": 1 - }, - { - "name": "plugins/modules/packet_volume.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2c86c31c6ea2c93dcc429b090da7bb20f035a1e21b38ed7010b40cde5fff3113", - "format": 1 - }, - { - "name": "plugins/modules/packet_volume_attachment.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ee069291be7657c0fbe528d35cbdf8566a8c558a108ad5c6de8af1e15f3c8175", - "format": 1 - }, - { - "name": "plugins/modules/pacman.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0632694bbe9620826447c3841d4581e718395b052c324c821ef261662980d898", - "format": 1 - }, - { - "name": "plugins/modules/pacman_key.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ed012d9d887cdf7f21196040f817b2831ee72056f9ce9a9cf52b622547a760c1", - "format": 1 - }, - { - "name": "plugins/modules/pagerduty.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cafe39cf6372187f9c3ab1aa1caedbb31e329474f46662be6dab7247c8db3e10", - "format": 1 - }, - { - "name": "plugins/modules/pagerduty_alert.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c816f9a62a0c0ba8c520986f4918945877a7e214de0693da2b444e3550a79419", - "format": 1 - }, - { - "name": "plugins/modules/pagerduty_change.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7f8b9d10f9edd7c2a7c896a660f920faa975d680ed799eb738ec7277205e748a", - "format": 1 - }, - { - "name": "plugins/modules/pagerduty_user.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "afe68c52a1fee0a441f79308f3e3f8fb296d9e5193bf74cb10b7a611e2a90c5e", - "format": 1 - }, - { - "name": "plugins/modules/pam_limits.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "87cc82831d55468a2c0d6d86970417652f0b6403b5f9c50ca6bb6d2e5560a294", - "format": 1 - }, - { - "name": "plugins/modules/pamd.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "968da2701d4dcb58bf11fb374bc3ccbbc3060c57ca3881fdf8f6bff30f9a8ad1", - "format": 1 - }, - { - "name": "plugins/modules/parted.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9ed692725bcc6a521bfab3f2fadf1933e99cad99896ab3400c8264306e883e46", - "format": 1 - }, - { - "name": "plugins/modules/pear.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0f80210e950fbe7d6db548f027713aec26864be6c579179f44128815410597bf", - "format": 1 - }, - { - "name": "plugins/modules/pids.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "dc2569182b41b994eba6fe7ff080628813b09e98c7ab70b9c10f236e6f33a01f", - "format": 1 - }, - { - "name": "plugins/modules/pingdom.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "19b5785687a9151584a01ce49b9321d1cb4f4fb9a105e8c53a6e10654b1a38ab", - "format": 1 - }, - { - "name": "plugins/modules/pip_package_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1b88e00fa70e9bd96bf141c4d44a7a282b02009c43faff54a4d9d54c69d137ac", - "format": 1 - }, - { - "name": "plugins/modules/pipx.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "57df11bbbf4ae34e6eb934afc6808286721268d74540379d1ab812fadbac296d", - "format": 1 - }, - { - "name": "plugins/modules/pkg5.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e36ce1069607e0608509fc036fb6454af0ede52c3682cb43dea44eedab746729", - "format": 1 - }, - { - "name": "plugins/modules/pkg5_publisher.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1197f2086a98fe014717bdf3396a4ab17ce600b9867897b9c9a5464b34f626b6", - "format": 1 - }, - { - "name": "plugins/modules/pkgin.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "dcb2759ad7a124939de46ccd21103b3a97d5a9dc027530532a9570cd039eb0d8", - "format": 1 - }, - { - "name": "plugins/modules/pkgng.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e7db8e217bcf87e0eb62e61a650f03a800e323132b8d9c25beaa244f77299510", - "format": 1 - }, - { - "name": "plugins/modules/pkgutil.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "be59c5c6e33732eee6662cca01a92d47c6391221783a8e13d3f3f6fe81c2116a", - "format": 1 - }, - { - "name": "plugins/modules/pmem.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "87f561ffee94533db91e813e348569aa7f44c076935e43430268f62a5ead5c0d", - "format": 1 - }, - { - "name": "plugins/modules/portage.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ef869657263254c0fe13e4b160bbf16ce1f935b79d1c65c522e528f1faff98c2", - "format": 1 - }, - { - "name": "plugins/modules/portinstall.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7f8c255fa388d228c0c2b3e18296ab1f8d9e0ea669241099f8004ec8989b23b2", - "format": 1 - }, - { - "name": "plugins/modules/pritunl_org.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "200240d97abc57f33f1a19342dac1cc7586a35fedb314cc23770567f5af6a5be", - "format": 1 - }, - { - "name": "plugins/modules/pritunl_org_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6b8662b1c14487caf1366ef5e99c84e1b5baeb07f1c7d28d23207a1f3d3c46a7", - "format": 1 - }, - { - "name": "plugins/modules/pritunl_user.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fa3c63e2d3575ce83371962f14da45413042adcb058eece23edb26b80e4337f5", - "format": 1 - }, - { - "name": "plugins/modules/pritunl_user_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "58e441115653a6326381d3d25bfd37d2a73c52624a67c8432a886baf4ed873dc", - "format": 1 - }, - { - "name": "plugins/modules/profitbricks.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e1035b261ade1c2568e0f93dbb06707388e21429b84cfa7b4493292bdb69cd4e", - "format": 1 - }, - { - "name": "plugins/modules/profitbricks_datacenter.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3ab1693fea48313a4dc8fb165ae2853158e5709343485d309cbe7961d744bb67", - "format": 1 - }, - { - "name": "plugins/modules/profitbricks_nic.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4ce736c85be70ad04561b01d6e6f51e5385b31da9780ba8eb99b08e9a3c36267", - "format": 1 - }, - { - "name": "plugins/modules/profitbricks_volume.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d163b85748524327ba48c5a02130375d76d34e6e298c02f486e4f6ab51762430", - "format": 1 - }, - { - "name": "plugins/modules/profitbricks_volume_attachments.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4ccdc52719b66398ea0c39c87936dc3e6c4775a9cb0eccafa15ec5b6ecf37a1b", - "format": 1 - }, - { - "name": "plugins/modules/proxmox.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "de0af339bdcdae46787c8155267127026c83906fb5a611d413d48f920f593406", - "format": 1 - }, - { - "name": "plugins/modules/proxmox_domain_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "404732bc66d7699feef8ef40d0b233175dffa595bcbeb0be5d9c5de1be939ffd", - "format": 1 - }, - { - "name": "plugins/modules/proxmox_group_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5a1aaeaeb18930ce9fef9191606f7eb3f17d4e4bede11430cc0a50a5b8ccca5e", - "format": 1 - }, - { - "name": "plugins/modules/proxmox_kvm.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "345ff443e5a2c4b7d3b639801158a3348f5206e6e7fb819443b7ddb9abf8d79b", - "format": 1 - }, - { - "name": "plugins/modules/proxmox_nic.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "82acf570c96c694cdec1e8c1f54e7175d3d05834f88fd4b8c4400583c61b3dae", - "format": 1 - }, - { - "name": "plugins/modules/proxmox_snap.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "05ad941a753ca0cbb753aacc9c689b31ea0738f010021d871f04b73d95e3bccf", - "format": 1 - }, - { - "name": "plugins/modules/proxmox_storage_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1dc50691f4b30c6302c87897678574422aec3e1aa21c02725880eca3d6ff1aff", - "format": 1 - }, - { - "name": "plugins/modules/proxmox_tasks_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a0d8964c27196fd1859ab45283fa2d5cc71e2190527a6fd5cd8396acfe1f434c", - "format": 1 - }, - { - "name": "plugins/modules/proxmox_template.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "23b7eaa4514c3321c755bdeb1b4a234251677c0fd0396ed3262dc92ada19ac0d", - "format": 1 - }, - { - "name": "plugins/modules/proxmox_user_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "15179fb8a51fd8634785e7b7a42aab97f83dd4d5d5765eebea3eb31f180286a8", - "format": 1 - }, - { - "name": "plugins/modules/pubnub_blocks.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "50fcf2e14b3f56378ea705af16211c4251d4a5a5122958cd6682fced6c98dccc", - "format": 1 - }, - { - "name": "plugins/modules/pulp_repo.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "27a10386274c0e0ce4b1898686fadea5811dfd7ad45b5daed757d360a70ba2e0", - "format": 1 - }, - { - "name": "plugins/modules/puppet.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b5fa5b7c452ca6ff19a0dec8516667e2afc31f5388fc822a92e20d4c144e2a91", - "format": 1 - }, - { - "name": "plugins/modules/pushbullet.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0763b7e2415a71cd93764b56b5a4f8e07431b19f657cdfe5f59b1e8c63b8ddc4", - "format": 1 - }, - { - "name": "plugins/modules/pushover.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8d4b6f7686646e0d44a7ad63811b8c1f69927317c2ce8cea4ff855027355c219", - "format": 1 - }, - { - "name": "plugins/modules/python_requirements_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9fa050aedaedf5dd2693f4443418b780e5efbe06bf332f6b1fd675dec120ac6f", - "format": 1 - }, - { - "name": "plugins/modules/rax.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8e3fbbc102737d0fe16362a643d016bbea5db56591c2be2a1c461f2a8b3d4fc9", - "format": 1 - }, - { - "name": "plugins/modules/rax_cbs.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e047fe633ea45e4dd28af0bf2d8ad2a438565d8b846cd0d49354cdd17842996a", - "format": 1 - }, - { - "name": "plugins/modules/rax_cbs_attachments.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d2008436cd3bb9269d018c7ca69ffb40a5d21849654d2ce32c77562e548d4dca", - "format": 1 - }, - { - "name": "plugins/modules/rax_cdb.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "683f0f1f2b266b1ada2b8be24775212eaaf30be12cc8b635485f19bfc6d9de92", - "format": 1 - }, - { - "name": "plugins/modules/rax_cdb_database.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c6fb0e2ff24073a0640ef83786ca3a648b418c8bba2281c6cecaff69903723e3", - "format": 1 - }, - { - "name": "plugins/modules/rax_cdb_user.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "57e216faeaf3e709dd5b9a357bc46b177c502ed5faa6e05e41072ebfd7fe3995", - "format": 1 - }, - { - "name": "plugins/modules/rax_clb.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8c48721843ae0d6888b58b6d4565d22a5be02937f60cbe3f42d39d7c376cb8e4", - "format": 1 - }, - { - "name": "plugins/modules/rax_clb_nodes.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8c9a278d819787913d75be28f443ba31c8c2a108bb63c39352d35cbdb600d067", - "format": 1 - }, - { - "name": "plugins/modules/rax_clb_ssl.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2134871c3e02d0529e9f1ca574f24140c8d6f6abeaf8a6ba96c0105b7541e489", - "format": 1 - }, - { - "name": "plugins/modules/rax_dns.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ccbac0266e76a9edf1afcf903675822f0677a3f4d6000d729de7cffc4b54677f", - "format": 1 - }, - { - "name": "plugins/modules/rax_dns_record.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d6c8e88e7e4fd6eb2ede8ba0ee9c59083f24204160ae3fddfe6677b036c63491", - "format": 1 - }, - { - "name": "plugins/modules/rax_facts.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6530466f3a66bba556b2ebe9c4b11cf825a50f2aa9cdb9de400030d8f6852bea", - "format": 1 - }, - { - "name": "plugins/modules/rax_files.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b6bbf1c17d93faa948635783ca33f05597f6f69d81b7c3d24c4c417c428782ba", - "format": 1 - }, - { - "name": "plugins/modules/rax_files_objects.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "77fa10c9bf0457aef89bbd5c66d373fa59481009907c963dd7e81b7474d76529", - "format": 1 - }, - { - "name": "plugins/modules/rax_identity.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ec5013cf01d52790b1e04c5f4ccb0c7e8ab5b2e2fe67330fcd55ba1c63e1d4dc", - "format": 1 - }, - { - "name": "plugins/modules/rax_keypair.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "db1b2855da2f959529c5b377b2b7c7ea8c4a331c4fe507504d57370218b83fa7", - "format": 1 - }, - { - "name": "plugins/modules/rax_meta.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7431032026660e219c8878da55c4f92f11caa614f4d08b7623c02ce28bd3b59e", - "format": 1 - }, - { - "name": "plugins/modules/rax_mon_alarm.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "aa69ba0c22d53762a053288d543c44a9170f6301904569b894a2ef9065c00af9", - "format": 1 - }, - { - "name": "plugins/modules/rax_mon_check.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "38726fb34a9b467da5545307324b3086e52a3c149ea371ff82e175986238dc42", - "format": 1 - }, - { - "name": "plugins/modules/rax_mon_entity.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4a5953d1f6359f10d254fcbebcb00d8a635958699ae75291517e7756a226a0e2", - "format": 1 - }, - { - "name": "plugins/modules/rax_mon_notification.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a6119af771292fd53cb0c145c91d0064e726e232da3f369f31d854208d83b5b3", - "format": 1 - }, - { - "name": "plugins/modules/rax_mon_notification_plan.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fe003304bbf888f7b68ced5e4dc8348a14d1ae27189b042f71a2855ccc1040fd", - "format": 1 - }, - { - "name": "plugins/modules/rax_network.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d09efe5dc0a5abc207de3126078da30dddfa08fdd6fe5134c95c17b4c6d21597", - "format": 1 - }, - { - "name": "plugins/modules/rax_queue.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5f35abbe7a421095cddd7477be5d6abc598205d1dcaebb9522b39c69cf6e2b7e", - "format": 1 - }, - { - "name": "plugins/modules/rax_scaling_group.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "de94c72c31698ef52fab96fa5de59a280fa501c39925048c6a82c6117454da9c", - "format": 1 - }, - { - "name": "plugins/modules/rax_scaling_policy.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8dfc1a96c6377f3000222233ffd13ab883bd66ddb706dc3fa1f810a7a4c066a4", - "format": 1 - }, - { - "name": "plugins/modules/read_csv.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d62a1f5b65ca81a1ba775829d7adc5e175a776de15e544cf85ea321ded35c145", - "format": 1 - }, - { - "name": "plugins/modules/redfish_command.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "423c2bafbce9538603e607934a6c61cb94d96014b901894a750156f2c6f9134c", - "format": 1 - }, - { - "name": "plugins/modules/redfish_config.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0b46c6fd282bac3a6a347c25af71a4c9eaab7a54fb019541606824c4ea167e99", - "format": 1 - }, - { - "name": "plugins/modules/redfish_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5f9aeb09e5827e46c9b6b4420362d7c27d729672322a10637d66164d5341e980", - "format": 1 - }, - { - "name": "plugins/modules/redhat_subscription.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "69c5a89501f2ec7d9cc4dc7ec38941bbbdaa5548d60121bd8734891f5c210d29", - "format": 1 - }, - { - "name": "plugins/modules/redis.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b2d25f0de427359721101190758005d983d3d8f040fcd4a4eeb1453b90e4982b", - "format": 1 - }, - { - "name": "plugins/modules/redis_data.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ff6b0f5c3b03c74c23565cea5bc0598d1107145ca22ce05c18f077d2c14546b2", - "format": 1 - }, - { - "name": "plugins/modules/redis_data_incr.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f098910c5bfeb5edebd3a6ce2a9156d51c534dce997697f219d2a8eea297a27d", - "format": 1 - }, - { - "name": "plugins/modules/redis_data_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "671fc3bfc6d1b36aa4f2ae686d2d5fc180a1decbd61efe3f03bcada8b29da0a8", - "format": 1 - }, - { - "name": "plugins/modules/redis_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "50a909e9a9cc3d2b74c3146d45a444f24234dca68e399a18474d8fbdae19d5dd", - "format": 1 - }, - { - "name": "plugins/modules/rhevm.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ca073abab71f54b5ad451eb6ba62e075f54bbc106251a44ae984c16b60e4496e", - "format": 1 - }, - { - "name": "plugins/modules/rhn_channel.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6753c4f100c54548d9a34cc55191a1dff35e789e3ad60a476eabcb85d6e3a71f", - "format": 1 - }, - { - "name": "plugins/modules/rhn_register.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3fff78a3b4e686e3e760bbf42691db83540ef06b7d88f28b57223a09f581485d", - "format": 1 - }, - { - "name": "plugins/modules/rhsm_release.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a20574f661bf3bcd1bdd02688ed4112eb7a2b35689427e70f5e455ddad7ec1d4", - "format": 1 - }, - { - "name": "plugins/modules/rhsm_repository.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c868fab9daf9cd10efb1b01f613cdb85848f37596464a67fe777b68a681b47b4", - "format": 1 - }, - { - "name": "plugins/modules/riak.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e4c8bf321e77871edc3c0a5c342707a50e9c2571fca0ab5bfd8197c682a28b80", - "format": 1 - }, - { - "name": "plugins/modules/rocketchat.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "952dbea3dbfd46a029b9ad19b7a5f3d7659df608a9346f067563fd98f9e8ce65", - "format": 1 - }, - { - "name": "plugins/modules/rollbar_deployment.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d281b9e5f08730d58c9aac003d90b45151f9819eb871dd900e63ab3d882f5998", - "format": 1 - }, - { - "name": "plugins/modules/rpm_ostree_pkg.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e0538d35acc1c91abd3bdfa76310252f9782693e7328722ca04228100cebfb76", - "format": 1 - }, - { - "name": "plugins/modules/rundeck_acl_policy.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5f5d8165b92c6995925b290f7956385d5f58e67db78fc5999a8d9fce2c8631a4", - "format": 1 - }, - { - "name": "plugins/modules/rundeck_job_executions_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "70a72bee59a76399bccced7e6db5b5079df984405f5e8f6c03aa077cf0a3954e", - "format": 1 - }, - { - "name": "plugins/modules/rundeck_job_run.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "11003889632bd0531f924dd291d0e9df1ccad0225e3e252e9dc33a258768c8b1", - "format": 1 - }, - { - "name": "plugins/modules/rundeck_project.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c2c34f541040b892e7f031487104db7ec1b0e1a522817e8308d586f9d503f6f8", - "format": 1 - }, - { - "name": "plugins/modules/runit.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "72f3a3dfab5c5d69e79feb4564374076228b714b842e6606bebdc08317c2d74e", - "format": 1 - }, - { - "name": "plugins/modules/sap_task_list_execute.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b1fe8a9ff6fd21d93aa37a3bb40f875dfae6d25c2d5aedb6580197f77cb75ead", - "format": 1 - }, - { - "name": "plugins/modules/sapcar_extract.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fd7fec63a0695a033b2b637499b1f6ace8dd36bd9656f912632260dbc04ae88d", - "format": 1 - }, - { - "name": "plugins/modules/say.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9427eced754de74bbb015098444c4cee334620980bcf62c4c6f7e687475515e6", - "format": 1 - }, - { - "name": "plugins/modules/scaleway_compute.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a8f1883b5813315b1c406b285ce00016aa5312559637765b054126e81d818350", - "format": 1 - }, - { - "name": "plugins/modules/scaleway_database_backup.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "dcd9a15702a9b326e06210c5c14b402504efae5c23f86242921fe745d321d2a4", - "format": 1 - }, - { - "name": "plugins/modules/scaleway_image_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "da6f412ff90955c2ba5ade197e163dc3c36458c036d36d30b0bee1c96e974e43", - "format": 1 - }, - { - "name": "plugins/modules/scaleway_ip.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "96e4fa6eb157e3553fedf94669681c529add87cabe8aeab442c9f7173f4f398f", - "format": 1 - }, - { - "name": "plugins/modules/scaleway_ip_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5abf4b2c16ef564f485d7be74882b003c3934ded53fe9115022808d88bd90db1", - "format": 1 - }, - { - "name": "plugins/modules/scaleway_lb.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a99b738a4a7baffa0ab13ab5ed273bb0d4a0c4ee84a9121dbc7def22fdade7b9", - "format": 1 - }, - { - "name": "plugins/modules/scaleway_organization_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6e29446afc7823fbf66142c7296c24418538474da1eb6180a4fe3ae6e97f3477", - "format": 1 - }, - { - "name": "plugins/modules/scaleway_private_network.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6f95de713da409754100cd96cee21a007082f2bcc93bcbe5e0cc3b85b0324918", - "format": 1 - }, - { - "name": "plugins/modules/scaleway_security_group.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "41072a0cee6fb8794ea5214ef95f065b9b1cda8ee36296966c529e867655e27f", - "format": 1 - }, - { - "name": "plugins/modules/scaleway_security_group_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7fa7d5011ba75d648159310c0c1fc26b573d56c973166591c23044d1e72c5492", - "format": 1 - }, - { - "name": "plugins/modules/scaleway_security_group_rule.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "03916e5b092a441d6945ccae89c93968f21083bd551ddb48e9f0b280b9abec7e", - "format": 1 - }, - { - "name": "plugins/modules/scaleway_server_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4c5955d275d799b7879ce024888d3d62288c3e19f377b42bd8e22c4d366915b7", - "format": 1 - }, - { - "name": "plugins/modules/scaleway_snapshot_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6127b39c7c7f02fdd516efe60384f4d0b47165e4fb6ba81b5f96a7e42f559983", - "format": 1 - }, - { - "name": "plugins/modules/scaleway_sshkey.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c8b5023d5077dfba435674012a27ea807d9352709feacc2eed6b1e5f86f8e582", - "format": 1 - }, - { - "name": "plugins/modules/scaleway_user_data.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "eb81200dec6e35c01ca780a99c933f255f35c5ce84a3f3f9a1fb24547f295cb7", - "format": 1 - }, - { - "name": "plugins/modules/scaleway_volume.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "49173705942bca167ab4caf077dd0ada20360272352443e1a341e624e2d5e77d", - "format": 1 - }, - { - "name": "plugins/modules/scaleway_volume_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c85b5e98e0b397f944ec775736381d6244c8a983117449366f58df4dd11c49a5", - "format": 1 - }, - { - "name": "plugins/modules/sefcontext.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "be1154ed383b3b642dff0e92276c0943ec2e7a5b875e7f16e78ee5764c1d8283", - "format": 1 - }, - { - "name": "plugins/modules/selinux_permissive.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "52a988c4f8aa70cd2734333b75b7ec5977be80c272badca53a60df50f157458d", - "format": 1 - }, - { - "name": "plugins/modules/selogin.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7424203ca02499f11893f07191e356ee4bf7a92f8c6c66f3760bb3662756bf38", - "format": 1 - }, - { - "name": "plugins/modules/sendgrid.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "74a1a142ea29a5519ab4fe938192638ae79b54f40a957dbb7d2b4e3ac4474b87", - "format": 1 - }, - { - "name": "plugins/modules/sensu_check.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "15aa5b61a60a0c812caf893e14c76f55150fa535edbba58a698fa0b07a95687b", - "format": 1 - }, - { - "name": "plugins/modules/sensu_client.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "658c2f425bd755eca7ff3317d9bc4ae20ab2d4650b8659b9846455a4cf650e84", - "format": 1 - }, - { - "name": "plugins/modules/sensu_handler.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d95a2dcc85c1c9ccb51ef8cd0f6412a841db023dfd3412b47bd8aad17e5608fe", - "format": 1 - }, - { - "name": "plugins/modules/sensu_silence.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ae9e6d8b69a746cf8e985499ed73c177abb02fdd13bbd04a501a8f76fff96fbc", - "format": 1 - }, - { - "name": "plugins/modules/sensu_subscription.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "530a5fd15a37728a1fd346f68300ecc4fcf28904c1cf3663875006514f0db31b", - "format": 1 - }, - { - "name": "plugins/modules/seport.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "56ce94a493847ce43ad44e30af4bd87b816feeaa4ce15648828998b34efdb721", - "format": 1 - }, - { - "name": "plugins/modules/serverless.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6ebbb91033f3ec1b0a3635b74a288f037d5ed6297f167b5bc94cdcfebc5dd81c", - "format": 1 - }, - { - "name": "plugins/modules/shutdown.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "02c339648349f7eaa4fc7b64c85ee8c40cfc98cda4c9b97879658efaf889f552", - "format": 1 - }, - { - "name": "plugins/modules/sl_vm.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9b4ec3201ad6d82903722224f35f39c899ee94d96596ada4b112c658d55d8b76", - "format": 1 - }, - { - "name": "plugins/modules/slack.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e96ef97177e41d87862c20fe5daa14f60230671ba34309b83477fec933c4238c", - "format": 1 - }, - { - "name": "plugins/modules/slackpkg.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "65d52caf009ae8dc698a49d4fef5ac6644954a6c46a68fd961b0e690ddfdc141", - "format": 1 - }, - { - "name": "plugins/modules/smartos_image_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e9c4e315cdd50c84ede09c145a86eacb98515d36fc87251ce11759d26de30200", - "format": 1 - }, - { - "name": "plugins/modules/snap.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "513ff327c2a09f42eaa5a945f0b72fe2e6e17bbdc5491b6875c04eaa8f846b48", - "format": 1 - }, - { - "name": "plugins/modules/snap_alias.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b23129de9e88a07cf2c3d5012cb32ec105622e7dfcdfbcdaf694dcdf92cf518b", - "format": 1 - }, - { - "name": "plugins/modules/snmp_facts.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "76246be2be66921ccb940983d25eef4bf5b8cb2f2b96b8bb3f9971bda482ee68", - "format": 1 - }, - { - "name": "plugins/modules/solaris_zone.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "60a77ff20a8d31547321204ecb03e5962a99cb34773e9bb46cf25ecfd0ef52d8", - "format": 1 - }, - { - "name": "plugins/modules/sorcery.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ed8fec8e6c5357a8e0a4d7cf020c253a574f8c239f3371b9604beb90cb0975db", - "format": 1 - }, - { - "name": "plugins/modules/spectrum_device.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "883564d265cd219779f52beb177c1eee686445277aec016a0000a9734bb3f426", - "format": 1 - }, - { - "name": "plugins/modules/spectrum_model_attrs.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a67e3c3ee88a04add9cd67e38778c14b56e9dec145c843f4cbafa550fd9851a9", - "format": 1 - }, - { - "name": "plugins/modules/spotinst_aws_elastigroup.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f52f9bdf880e006dbdbbe2882289d506e89c50c4d1bad2ffc45706c7fc41eda2", - "format": 1 - }, - { - "name": "plugins/modules/ss_3par_cpg.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2be10ff6aa61f598720d6ca0a1668a5ec6033680223fa3d3231192f3c12006ef", - "format": 1 - }, - { - "name": "plugins/modules/ssh_config.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a1764d656d155306fa1c01f06ae71350613998bab940e036272a702ec2cf7510", - "format": 1 - }, - { - "name": "plugins/modules/stackdriver.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2be5529a5b6f3c9366af6e422fafeea193922831655edd3bf7f7d98c440fb506", - "format": 1 - }, - { - "name": "plugins/modules/stacki_host.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "63b57ef41bb4ffad7bd5def9d9d592e3bf2aecc1b22dc66a303774f3b6b95ef7", - "format": 1 - }, - { - "name": "plugins/modules/statsd.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "792e64a34b4d66ef704505a6464ab5d809822c2cf4277662559b3257b023f903", - "format": 1 - }, - { - "name": "plugins/modules/statusio_maintenance.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f86b31e7026fa92e312f3196ff270441d9fe75a5e67886bcc1b8c9e3e8d12459", - "format": 1 - }, - { - "name": "plugins/modules/sudoers.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4b125be575e79d2de7d840aef13ddf5ed40623de0f5e5bc74863e5a09610a5ee", - "format": 1 - }, - { - "name": "plugins/modules/supervisorctl.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5a130a0e5a2402d2d964a069ae288d1faff9808d48f8b0f4d4a83a9fa55192ba", - "format": 1 - }, - { - "name": "plugins/modules/svc.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "97cb8133ea514678200f8dd1d4041ce90327486c903143912d7995806c16457a", - "format": 1 - }, - { - "name": "plugins/modules/svr4pkg.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e6fdff83fa4d867e28b52c26ab42377cb8b793218b68a4d538c06b923a78cfff", - "format": 1 - }, - { - "name": "plugins/modules/swdepot.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7cf596e285fbcb98e9bae8ee345d63daa2528c34fd93138d6c9afb77db2f7d8e", - "format": 1 - }, - { - "name": "plugins/modules/swupd.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8247ec718e884f51246f84426c2c50ed7a48aac0e7ef97161ce11e3aa62662fd", - "format": 1 - }, - { - "name": "plugins/modules/syslogger.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "abcf172195a8f6b74396dd273e2d9926c0c6bbba773f5949f9565b2cd2aaea07", - "format": 1 - }, - { - "name": "plugins/modules/syspatch.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "89c7d7ddd8731028bb3f5ea8426de2b5b8f19c0d2d9a0e6978aa67347be0540e", - "format": 1 - }, - { - "name": "plugins/modules/sysrc.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cd30445b5d09ca12cd4166dd59f204b4be4e0761ac8ddf7dd851a2d5026bcebb", - "format": 1 - }, - { - "name": "plugins/modules/sysupgrade.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9c9bab43a8cc9cb85528181f72c9a881e6e53a39755461800aded2b3a27216c8", - "format": 1 - }, - { - "name": "plugins/modules/taiga_issue.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c3f0162389f24357b7981000dc718ef8a794b260ef570753703bfa372d593583", - "format": 1 - }, - { - "name": "plugins/modules/telegram.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "166e6d4a5b832d22b024dc9395780a807341ebbb6d5a78726dd40d9f5214fbbb", - "format": 1 - }, - { - "name": "plugins/modules/terraform.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "987e496081df3b156adbf5cb65de3e5e4ff9008b04936272b5b63f8d80c65eda", - "format": 1 - }, - { - "name": "plugins/modules/timezone.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f762436db06c2b4085c9421b3e9a2337d1b65e1fce6663cc55e6d2efbe774668", - "format": 1 - }, - { - "name": "plugins/modules/twilio.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bc5913224c8de906d7739278662d6efa7055a88ecc24dd2e568a2c33065b0e23", - "format": 1 - }, - { - "name": "plugins/modules/typetalk.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8b0c5a2c18ec40da946914f93655f144d608fcc4737cca258642c44d69245b42", - "format": 1 - }, - { - "name": "plugins/modules/udm_dns_record.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "828fafca6838e91191a827d40961539a3820507d08f82b0cb6dcdaae53d9b9ba", - "format": 1 - }, - { - "name": "plugins/modules/udm_dns_zone.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c18407922bc2d77ecfa7e650c0bd7a90eb896fe07e6c9eac191d0e68f63df2e1", - "format": 1 - }, - { - "name": "plugins/modules/udm_group.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9ee3d7282c96a82bf24b58260e3fef9a376e9c2d768d0dd7c4e1ec648288fefd", - "format": 1 - }, - { - "name": "plugins/modules/udm_share.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b9ed6ebf6e752a6cb101f24248ec3e319f1965287de8a61a638530701a6137e9", - "format": 1 - }, - { - "name": "plugins/modules/udm_user.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "54edc8d89620ddde5caed48faea3e0341ab0d4dff6605c512d67767468fa49ff", - "format": 1 - }, - { - "name": "plugins/modules/ufw.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f0958a3686ca75540353eddd3148a6e4b19ed9b57bac7e6994e949572dd2a1fd", - "format": 1 - }, - { - "name": "plugins/modules/uptimerobot.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b5aa626e5c790d9b21ef75af42ca78551c07e38e3539ce6dcafcd638cfa8d9ff", - "format": 1 - }, - { - "name": "plugins/modules/urpmi.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2533a194a0b36cceeb0ec69d8586cfe12e8f4c7bdf13e22dc68c7dc9d1c8ceec", - "format": 1 - }, - { - "name": "plugins/modules/utm_aaa_group.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0ad7213f9e7d5c8683f0a608a816f02f935bd3aa514be57a18671290391e7a44", - "format": 1 - }, - { - "name": "plugins/modules/utm_aaa_group_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0f7e66c06b83fec400b96810f28ce02f9d7c6c20cec8ebe5e321f163c318d8dd", - "format": 1 - }, - { - "name": "plugins/modules/utm_ca_host_key_cert.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d2c1649b50116c8b150ecdd4ca13c91bc52f49a22a57cd7aaec2d4c6125c0524", - "format": 1 - }, - { - "name": "plugins/modules/utm_ca_host_key_cert_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "826a7d96e136504ae975e591e769dd5fdff2c96b59eaff5535dfeb43fbaf08d5", - "format": 1 - }, - { - "name": "plugins/modules/utm_dns_host.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fc5c40e788f2cf6dd4e82f618f6f37ea21e3ce497c640c49bfd9ec2ccdf234e0", - "format": 1 - }, - { - "name": "plugins/modules/utm_network_interface_address.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "29d9fe615e9c8b54a8bdac9ca4c4a0436ae3d3cae2972bae73df9fbb071072e5", - "format": 1 - }, - { - "name": "plugins/modules/utm_network_interface_address_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "399fa31a5fc8cfcf1a0f8fd944f7ca139446413e6fff5251083c226bb5274aa7", - "format": 1 - }, - { - "name": "plugins/modules/utm_proxy_auth_profile.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "54ded3e29eec68ce76581b665af3228e58fe76211ffc3a392a890d42eac30289", - "format": 1 - }, - { - "name": "plugins/modules/utm_proxy_exception.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4dd04942dd16dae3c1e1de10712363b8cc67597db2647fc58d3a085c0a5d6e0b", - "format": 1 - }, - { - "name": "plugins/modules/utm_proxy_frontend.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1d39c2514f334eace3ce91c284d85afbaa6ce488b6dec69d7cea6689247fee56", - "format": 1 - }, - { - "name": "plugins/modules/utm_proxy_frontend_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "da27864c36b0b1636bb1016f6623d38cc2685d9f1073d9023baf6650e2b5fbc5", - "format": 1 - }, - { - "name": "plugins/modules/utm_proxy_location.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9b3f2a4ee29a7fd7a468d7a4feaae37f0ce5d90fc963a91561feae1de5cd21f2", - "format": 1 - }, - { - "name": "plugins/modules/utm_proxy_location_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "af35736343e2510d4ff9dc5ca4a01c3a6a17ae83685ea43381b8ae84190f1050", - "format": 1 - }, - { - "name": "plugins/modules/vdo.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "89c6c5018638467973eee8012275abf8a5f611a01cc073bc82ce583e52b3639f", - "format": 1 - }, - { - "name": "plugins/modules/vertica_configuration.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ff26d24f57fbea1fcf16e64ce0eff1417624bcf5224da566422a6086512a8c19", - "format": 1 - }, - { - "name": "plugins/modules/vertica_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5f270fb5c6863524079c13320d7816bb446f48b485e5fda83fba3d76183a70a9", - "format": 1 - }, - { - "name": "plugins/modules/vertica_role.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4127075612f26e1b253766e24f5976861a9f3a985cdfc0150c46bccf394f7ba0", - "format": 1 - }, - { - "name": "plugins/modules/vertica_schema.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "69e66027dd2f802db9e894e4e45ba61e8f7324d0439807f06f1e0766508e371c", - "format": 1 - }, - { - "name": "plugins/modules/vertica_user.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d9c75983531caeba4f82346be16d82e759af99ea6ab5e55253b68cce5919e394", - "format": 1 - }, - { - "name": "plugins/modules/vexata_eg.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fac270b3db28c9f8b6d24d299e753c80f9d251dbbdcb386a319097c17219a80d", - "format": 1 - }, - { - "name": "plugins/modules/vexata_volume.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a6377d7306fb5a11f52aaa9a89cff909e8028a7cef71959eb6a7135ba1561d4a", - "format": 1 - }, - { - "name": "plugins/modules/vmadm.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0971c82f4b903c4b66b249f6c334ca5e8807d8a6e331259df1e6b3a857b3cf79", - "format": 1 - }, - { - "name": "plugins/modules/wakeonlan.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "eaedb6adc85510f03ea6424a673ef862122db281b83f75d3f66668652443fec8", - "format": 1 - }, - { - "name": "plugins/modules/webfaction_app.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f7255baa5e5a1f4ce32ccf47ba28e520f8216cc456e76ca4e2f58011db66f55e", - "format": 1 - }, - { - "name": "plugins/modules/webfaction_db.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "33d9e30418f53beef9333cac841481ec7a12104b9b9dd83827509662e983b36a", - "format": 1 - }, - { - "name": "plugins/modules/webfaction_domain.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7b7e7f59ae55508f5fa363376723a14dc3ab3d7823c962eb37f54c7d01381646", - "format": 1 - }, - { - "name": "plugins/modules/webfaction_mailbox.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f7bd28802ef9156cb65184bb6b2c890fe68011c50039032b364e411a7fe778cc", - "format": 1 - }, - { - "name": "plugins/modules/webfaction_site.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "251596c3e652ec903f30714b4ed852fbb57ddfeb7a37e11e2189dc2d52a98655", - "format": 1 - }, - { - "name": "plugins/modules/xattr.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7e921b2dae03b00730009593599edb959ad3ff62419caeb3cbeaecdd9be9f2c2", - "format": 1 - }, - { - "name": "plugins/modules/xbps.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "089f8b636b6bf7eb741857050bb8f3e105c919e705d561501bb91f9a1301af87", - "format": 1 - }, - { - "name": "plugins/modules/xcc_redfish_command.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "457f980a1ceb9c24d26aa2b7145d26f8902c56a4cbc0ffc7ddaae24670f48741", - "format": 1 - }, - { - "name": "plugins/modules/xenserver_facts.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a5316dbdb00e13930e0b2e5aa05255e7f7166ccb568fda58e4f5b3ef7e9eb7de", - "format": 1 - }, - { - "name": "plugins/modules/xenserver_guest.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "75fdda2a572d73b2248badab234d9a4c91caf035be8bbf450652fc567aef5c6b", - "format": 1 - }, - { - "name": "plugins/modules/xenserver_guest_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "55a88716e40077de6218ae32f47d73349dd6993469e02a0c0d867b6638991280", - "format": 1 - }, - { - "name": "plugins/modules/xenserver_guest_powerstate.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "512b73487169482e8887c48e6f3278950736c93a5c2a4c698b149e80217bf270", - "format": 1 - }, - { - "name": "plugins/modules/xfconf.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3e6be01aa8dd20b6a1280caa636ea2321e0ce1635a39ca05517689b94716db9c", - "format": 1 - }, - { - "name": "plugins/modules/xfconf_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6a5da7521b9e492203fa819ac907686227c1184a6ccb327c35a3b5e6b59b9e6e", - "format": 1 - }, - { - "name": "plugins/modules/xfs_quota.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "27825f948b8481607c8829578da78f5b9030677cdf578304491fc9d6ca4f1348", - "format": 1 - }, - { - "name": "plugins/modules/xml.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "20c53e6a1125b9d310540e60133de640668297ff31b91842bdd659ab0155f688", - "format": 1 - }, - { - "name": "plugins/modules/yarn.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d932d7644fb9f5e4a333c1e402b68b485a16e3d14883df4b8f9a1f39442d077d", - "format": 1 - }, - { - "name": "plugins/modules/yum_versionlock.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e9581edf16a8ece5930e0eefd40622ee4e4b453e564d3e40adcdf949ec1257dc", - "format": 1 - }, - { - "name": "plugins/modules/zfs.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0d5c3365e12bd96290f24b1ec13e5161e61f505d07110e03ff58195397373516", - "format": 1 - }, - { - "name": "plugins/modules/zfs_delegate_admin.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3109f4627ebfb5190204f57294c84ad0d54197c99c3a001b1f69f5291124490f", - "format": 1 - }, - { - "name": "plugins/modules/zfs_facts.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "18a3b38a9f47f4f5112579b819de7d664e0b55db8995743d4eac364579af5e2e", - "format": 1 - }, - { - "name": "plugins/modules/znode.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "09e0d13c350448cb46706cc1106c643d1ede2a94fd54eb4c9bf6bb5a6b36839f", - "format": 1 - }, - { - "name": "plugins/modules/zpool_facts.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e26beb9afe4a1cbd3b2a05eec94c61ee16b586db9985c962f09c76c15f80883c", - "format": 1 - }, - { - "name": "plugins/modules/zypper.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4386efd38cb4d2e6b5f6ffd4a4d66265541f6ba78547359833de537095036b1a", - "format": 1 - }, - { - "name": "plugins/modules/zypper_repository.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ef4e8074966a7a65e9b22d703beee3f2b6b7aa5b22e28123bdc18d5043f8db88", - "format": 1 - }, - { - "name": "plugins/modules/cloud", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/cloud/alicloud", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/cloud/alicloud/ali_instance.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6273f052fa89f9ab9a27230eee5064a37333af680e24ba1d5a715ec11e83c980", - "format": 1 - }, - { - "name": "plugins/modules/cloud/alicloud/ali_instance_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "34c5d0b44fc32a43160e9c62290e1afecfe73481f22b9a9ce8b444c4517112de", - "format": 1 - }, - { - "name": "plugins/modules/cloud/atomic", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/cloud/atomic/atomic_container.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "13baf1b70fda761f06be5d8de58290518bc8707287af37fe1af641284fb504a5", - "format": 1 - }, - { - "name": "plugins/modules/cloud/atomic/atomic_host.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ef3911802c6f970e9014cb8fd849be9df1f8e897876fc9cce03cd66e7d3a2e5f", - "format": 1 - }, - { - "name": "plugins/modules/cloud/atomic/atomic_image.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "dd25dd2258096e58d9d2873a382e9e5f530cd6224d74325c5466a829f9f6c5e2", - "format": 1 - }, - { - "name": "plugins/modules/cloud/centurylink", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/cloud/centurylink/clc_aa_policy.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "767f1e863c93bfe0e8d3bb37d7a029384caec1cf41eebde2c6ce60a864feb5c3", - "format": 1 - }, - { - "name": "plugins/modules/cloud/centurylink/clc_alert_policy.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "45e07b52737a3326a3debf36f5d38fc1fa33503b8fd7156f5f1fb19035a8f379", - "format": 1 - }, - { - "name": "plugins/modules/cloud/centurylink/clc_blueprint_package.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "52d3398cae86c645575a688a7f9dccccbd60b51d69743fdf2e64be70535c75e8", - "format": 1 - }, - { - "name": "plugins/modules/cloud/centurylink/clc_firewall_policy.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ef30311f37991878811921a4ece22412e4c94e92527e9d93d2f761efbfca658a", - "format": 1 - }, - { - "name": "plugins/modules/cloud/centurylink/clc_group.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "636a3b3a90bb1d9fd744e2a22f3ad42a6a372df6ffd9f2aef92e606391ecaee7", - "format": 1 - }, - { - "name": "plugins/modules/cloud/centurylink/clc_loadbalancer.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "87e5dace3e225dbd78b375a034bf5b582a4af0ba05b9276b1bf92caa61a8f5d5", - "format": 1 - }, - { - "name": "plugins/modules/cloud/centurylink/clc_modify_server.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "250d51c8692ee01ef2b75c9da4327adeaf79934aae75a942c45807a66ea9de62", - "format": 1 - }, - { - "name": "plugins/modules/cloud/centurylink/clc_publicip.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b183d61dc5fb36caf1424935c1915fe087322d608bcfc0211a84b56053e0555e", - "format": 1 - }, - { - "name": "plugins/modules/cloud/centurylink/clc_server.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6c7b6c85a2f14f4caab7d170ea0204f87428a5116e21eb8dffd4bcee26540111", - "format": 1 - }, - { - "name": "plugins/modules/cloud/centurylink/clc_server_snapshot.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8dd73687f3268d52da21504f88fc735fbf4a0761655db9693486a46b24263a16", - "format": 1 - }, - { - "name": "plugins/modules/cloud/dimensiondata", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/cloud/dimensiondata/dimensiondata_network.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4adadccb01c1cef01fe7d330d031c733cf61079bf28f82cab9f260d02355eb8a", - "format": 1 - }, - { - "name": "plugins/modules/cloud/dimensiondata/dimensiondata_vlan.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b30817b9ad59ecb496117d3f53cae29c288dc7307f0ea100b7a01f73dfeb998e", - "format": 1 - }, - { - "name": "plugins/modules/cloud/heroku", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/cloud/heroku/heroku_collaborator.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a540ae7b336b9ceb5b55d841ae1c8aa86b43da70501a51a7eafd576c59a888fe", - "format": 1 - }, - { - "name": "plugins/modules/cloud/huawei", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/cloud/huawei/hwc_ecs_instance.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "89845b03caeb5d8bc17443300b889399ae73b4da9df2d1404c1d9c09f042ae8e", - "format": 1 - }, - { - "name": "plugins/modules/cloud/huawei/hwc_evs_disk.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a74a926cd9e503aaebaa3a77d5e80dbba7e42c4c4a92f9c7dbcd147dda363714", - "format": 1 - }, - { - "name": "plugins/modules/cloud/huawei/hwc_network_vpc.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ad8ab2a633dea8a8afe36d610bd108ec2d8455632452935ae7d32b49b9f9cb4d", - "format": 1 - }, - { - "name": "plugins/modules/cloud/huawei/hwc_smn_topic.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "43f61a1ef273853a04a5a24138bd7f4d716d3892ba456b9d38a352d682fc26d8", - "format": 1 - }, - { - "name": "plugins/modules/cloud/huawei/hwc_vpc_eip.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4006ecd981645492fe82a37ea0910a40aac3e24e0e1503a046afa52e42e614a1", - "format": 1 - }, - { - "name": "plugins/modules/cloud/huawei/hwc_vpc_peering_connect.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d0eca5c552649fd19228928b85cf91670abd2122fd7a6afae49c91f7d84bae03", - "format": 1 - }, - { - "name": "plugins/modules/cloud/huawei/hwc_vpc_port.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0981c5ad00e6719986102308ac2745eb5d316fd7e0785ebc236102ad9c987ec7", - "format": 1 - }, - { - "name": "plugins/modules/cloud/huawei/hwc_vpc_private_ip.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "778aea0f9e96d24c7c51afdf7eb50bdcda5690d2ca1f10511ead89a47c30a116", - "format": 1 - }, - { - "name": "plugins/modules/cloud/huawei/hwc_vpc_route.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4369f9a4cfa48a82a66435bf9ebbfcd9a19dd8c91aaf1c5f6684fd33b5c5103e", - "format": 1 - }, - { - "name": "plugins/modules/cloud/huawei/hwc_vpc_security_group.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "49f9184ecdc9dcc89addc51cd8490746fb3a54089d403f4fb1c64a6f7516f264", - "format": 1 - }, - { - "name": "plugins/modules/cloud/huawei/hwc_vpc_security_group_rule.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cd75294789234ffe193cfa2ff95084fb3edb0de2a42d9a20309db99bab189997", - "format": 1 - }, - { - "name": "plugins/modules/cloud/huawei/hwc_vpc_subnet.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3e5ac97a4be19828a95658766474adba0d1b9c4f2bb2dff454cd4bb3aa821480", - "format": 1 - }, - { - "name": "plugins/modules/cloud/linode", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/cloud/linode/linode.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "167488e841b7c5636e0c1695d689ae29de74d3dc3d33e6bcb4001fb0a680f8fa", - "format": 1 - }, - { - "name": "plugins/modules/cloud/linode/linode_v4.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d1484d4125d34af5990398d36e479a00da32dd318259f2c686e315503124940c", - "format": 1 - }, - { - "name": "plugins/modules/cloud/lxc", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/cloud/lxc/lxc_container.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9479e4e14d7c49ddd745eb4ccbafc171fd89db2bad96b711e74dfcb457ca111d", - "format": 1 - }, - { - "name": "plugins/modules/cloud/lxd", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/cloud/lxd/lxd_container.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6f9dcc2405aff8a396a200b4a8ad4d9321553631966ddeed9c0fb1aee7f4ca94", - "format": 1 - }, - { - "name": "plugins/modules/cloud/lxd/lxd_profile.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cc2d195be2a466ba04309725e6b43fff6933ee7fd979fb7be890bbdd7451d55e", - "format": 1 - }, - { - "name": "plugins/modules/cloud/memset", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/cloud/memset/memset_dns_reload.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b84a25907522e6ce4bb42500d5a17d4d532da3de5a6d640fd4fb33a7adb147a3", - "format": 1 - }, - { - "name": "plugins/modules/cloud/memset/memset_memstore_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cfa529765f7db308a617550e52b56d21ab49e45003f27ebaa9771b78392abcc0", - "format": 1 - }, - { - "name": "plugins/modules/cloud/memset/memset_server_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6817c961286632c4ec868845cb3eb62f5095fd7c48a98dad1678071ab08cec28", - "format": 1 - }, - { - "name": "plugins/modules/cloud/memset/memset_zone.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6a5b2527e6602a6e9533c842cf944b71be146787a9ab908eca03de3d97ab6cc0", - "format": 1 - }, - { - "name": "plugins/modules/cloud/memset/memset_zone_domain.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "90d015499749fd99206a3f5e435b8bb3c59f971689f33024871a2b18125749c2", - "format": 1 - }, - { - "name": "plugins/modules/cloud/memset/memset_zone_record.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0db0abd59574ef77493cc31edd1adf8d644740c6968352f94e58a60ea01534a0", - "format": 1 - }, - { - "name": "plugins/modules/cloud/misc", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/cloud/misc/cloud_init_data_facts.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a45eaa4abec3de3c7d4f0bc9338ed79308b522c2cca5496671da197901688986", - "format": 1 - }, - { - "name": "plugins/modules/cloud/misc/proxmox.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "de0af339bdcdae46787c8155267127026c83906fb5a611d413d48f920f593406", - "format": 1 - }, - { - "name": "plugins/modules/cloud/misc/proxmox_domain_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "404732bc66d7699feef8ef40d0b233175dffa595bcbeb0be5d9c5de1be939ffd", - "format": 1 - }, - { - "name": "plugins/modules/cloud/misc/proxmox_group_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5a1aaeaeb18930ce9fef9191606f7eb3f17d4e4bede11430cc0a50a5b8ccca5e", - "format": 1 - }, - { - "name": "plugins/modules/cloud/misc/proxmox_kvm.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "345ff443e5a2c4b7d3b639801158a3348f5206e6e7fb819443b7ddb9abf8d79b", - "format": 1 - }, - { - "name": "plugins/modules/cloud/misc/proxmox_nic.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "82acf570c96c694cdec1e8c1f54e7175d3d05834f88fd4b8c4400583c61b3dae", - "format": 1 - }, - { - "name": "plugins/modules/cloud/misc/proxmox_snap.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "05ad941a753ca0cbb753aacc9c689b31ea0738f010021d871f04b73d95e3bccf", - "format": 1 - }, - { - "name": "plugins/modules/cloud/misc/proxmox_storage_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1dc50691f4b30c6302c87897678574422aec3e1aa21c02725880eca3d6ff1aff", - "format": 1 - }, - { - "name": "plugins/modules/cloud/misc/proxmox_tasks_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a0d8964c27196fd1859ab45283fa2d5cc71e2190527a6fd5cd8396acfe1f434c", - "format": 1 - }, - { - "name": "plugins/modules/cloud/misc/proxmox_template.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "23b7eaa4514c3321c755bdeb1b4a234251677c0fd0396ed3262dc92ada19ac0d", - "format": 1 - }, - { - "name": "plugins/modules/cloud/misc/proxmox_user_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "15179fb8a51fd8634785e7b7a42aab97f83dd4d5d5765eebea3eb31f180286a8", - "format": 1 - }, - { - "name": "plugins/modules/cloud/misc/rhevm.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ca073abab71f54b5ad451eb6ba62e075f54bbc106251a44ae984c16b60e4496e", - "format": 1 - }, - { - "name": "plugins/modules/cloud/misc/serverless.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6ebbb91033f3ec1b0a3635b74a288f037d5ed6297f167b5bc94cdcfebc5dd81c", - "format": 1 - }, - { - "name": "plugins/modules/cloud/misc/terraform.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "987e496081df3b156adbf5cb65de3e5e4ff9008b04936272b5b63f8d80c65eda", - "format": 1 - }, - { - "name": "plugins/modules/cloud/misc/xenserver_facts.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a5316dbdb00e13930e0b2e5aa05255e7f7166ccb568fda58e4f5b3ef7e9eb7de", - "format": 1 - }, - { - "name": "plugins/modules/cloud/oneandone", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/cloud/oneandone/oneandone_firewall_policy.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "00bef2b89385e4be0273d6054adc6fcaf48909c8ed439860e4623bef5ea9a262", - "format": 1 - }, - { - "name": "plugins/modules/cloud/oneandone/oneandone_load_balancer.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6c58464049476dc05439d1b53b4cc76c1bc2efe57ef978e96250b227ad6dabf7", - "format": 1 - }, - { - "name": "plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f43e8dfe07f728583ce6162b1a5981a867bc80ee36577a12c03a330d0c9ede54", - "format": 1 - }, - { - "name": "plugins/modules/cloud/oneandone/oneandone_private_network.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "72d4a6199f1720039795746a96b49e65d755fa00ba4a2a2925abdbfd942927fb", - "format": 1 - }, - { - "name": "plugins/modules/cloud/oneandone/oneandone_public_ip.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f1621773c8720995326fce8e1c59c4c81c82b32ce86aa7f254bdbcea05ff29c3", - "format": 1 - }, - { - "name": "plugins/modules/cloud/oneandone/oneandone_server.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5c210342197346d900dfdd87c9078de8ced7247b82abd4e0ba56a47046729516", - "format": 1 - }, - { - "name": "plugins/modules/cloud/online", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/cloud/online/online_server_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "660ccee70609be58fdd563f516002d9f966f665367b9033b863572a352e2793f", - "format": 1 - }, - { - "name": "plugins/modules/cloud/online/online_user_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9d17d3d216d502dedc3ce76ac76a9037cea71cca92b996125c376581d6c5fc83", - "format": 1 - }, - { - "name": "plugins/modules/cloud/opennebula", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/cloud/opennebula/one_host.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "27fc10fab8637c26999d160cd0a07a6d2785d0884c0ddf6dd64b9167cbe261a2", - "format": 1 - }, - { - "name": "plugins/modules/cloud/opennebula/one_image.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cc5f0a799258a85e6580bb80e5853fe7f17f64d2baa149eb558994f968e62aeb", - "format": 1 - }, - { - "name": "plugins/modules/cloud/opennebula/one_image_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "230859d81cd0cfd8aa3495a6f19de66dc73995a56cd2a7c44fc975c3de94a24e", - "format": 1 - }, - { - "name": "plugins/modules/cloud/opennebula/one_service.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b8800ee2c709981d0fcc213975fa886aa4113b9d7b80846458ddfffd91d75420", - "format": 1 - }, - { - "name": "plugins/modules/cloud/opennebula/one_template.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "aa8c26db525d6ce3ea088ab7f104ffbe900969c5fef2253b11137ec3bfa76c8f", - "format": 1 - }, - { - "name": "plugins/modules/cloud/opennebula/one_vm.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4c2832ad3bd5c28b0c269539286f52c3f0492a52322ca9148335f63b5aac8f4f", - "format": 1 - }, - { - "name": "plugins/modules/cloud/oracle", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/cloud/oracle/oci_vcn.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f40472a5d3fa83672bee22b25f4bb8cd5dc058ffbc68fdd3cac95099e8be9029", - "format": 1 - }, - { - "name": "plugins/modules/cloud/ovh", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/cloud/ovh/ovh_ip_failover.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "10becd6c09fb98b6e8ed7838f7669e233249d51f8537aef736257b2a7ab62d69", - "format": 1 - }, - { - "name": "plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f8af6017776a3c0e311a963f790f75705c130f2cfdb3f59e9b090d496d192ae0", - "format": 1 - }, - { - "name": "plugins/modules/cloud/ovh/ovh_monthly_billing.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "921ff4d415e12a6ddbefc4a19a2d8807a9d7a3b7328c474fca5be64c59db55e6", - "format": 1 - }, - { - "name": "plugins/modules/cloud/packet", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/cloud/packet/packet_device.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "73d6fe85e58d6598d3c8f264f530ff774991bd76e0cdb84ec521e2b894ec6411", - "format": 1 - }, - { - "name": "plugins/modules/cloud/packet/packet_ip_subnet.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3866a31a3e100c8615ae771a933061ead29662d1027b48c3584a5b1097f81b2d", - "format": 1 - }, - { - "name": "plugins/modules/cloud/packet/packet_project.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fa9a3e9f17818d2a1485e7ada11b23fff4e8b6c375f805f45a7f57681d0c7a6b", - "format": 1 - }, - { - "name": "plugins/modules/cloud/packet/packet_sshkey.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e16a15dca05b676b606f42d23289dd512a7d465f269af8a60b96839cb19709be", - "format": 1 - }, - { - "name": "plugins/modules/cloud/packet/packet_volume.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2c86c31c6ea2c93dcc429b090da7bb20f035a1e21b38ed7010b40cde5fff3113", - "format": 1 - }, - { - "name": "plugins/modules/cloud/packet/packet_volume_attachment.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ee069291be7657c0fbe528d35cbdf8566a8c558a108ad5c6de8af1e15f3c8175", - "format": 1 - }, - { - "name": "plugins/modules/cloud/profitbricks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/cloud/profitbricks/profitbricks.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e1035b261ade1c2568e0f93dbb06707388e21429b84cfa7b4493292bdb69cd4e", - "format": 1 - }, - { - "name": "plugins/modules/cloud/profitbricks/profitbricks_datacenter.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3ab1693fea48313a4dc8fb165ae2853158e5709343485d309cbe7961d744bb67", - "format": 1 - }, - { - "name": "plugins/modules/cloud/profitbricks/profitbricks_nic.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4ce736c85be70ad04561b01d6e6f51e5385b31da9780ba8eb99b08e9a3c36267", - "format": 1 - }, - { - "name": "plugins/modules/cloud/profitbricks/profitbricks_volume.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d163b85748524327ba48c5a02130375d76d34e6e298c02f486e4f6ab51762430", - "format": 1 - }, - { - "name": "plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4ccdc52719b66398ea0c39c87936dc3e6c4775a9cb0eccafa15ec5b6ecf37a1b", - "format": 1 - }, - { - "name": "plugins/modules/cloud/pubnub", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/cloud/pubnub/pubnub_blocks.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "50fcf2e14b3f56378ea705af16211c4251d4a5a5122958cd6682fced6c98dccc", - "format": 1 - }, - { - "name": "plugins/modules/cloud/rackspace", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/cloud/rackspace/rax.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8e3fbbc102737d0fe16362a643d016bbea5db56591c2be2a1c461f2a8b3d4fc9", - "format": 1 - }, - { - "name": "plugins/modules/cloud/rackspace/rax_cbs.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e047fe633ea45e4dd28af0bf2d8ad2a438565d8b846cd0d49354cdd17842996a", - "format": 1 - }, - { - "name": "plugins/modules/cloud/rackspace/rax_cbs_attachments.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d2008436cd3bb9269d018c7ca69ffb40a5d21849654d2ce32c77562e548d4dca", - "format": 1 - }, - { - "name": "plugins/modules/cloud/rackspace/rax_cdb.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "683f0f1f2b266b1ada2b8be24775212eaaf30be12cc8b635485f19bfc6d9de92", - "format": 1 - }, - { - "name": "plugins/modules/cloud/rackspace/rax_cdb_database.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c6fb0e2ff24073a0640ef83786ca3a648b418c8bba2281c6cecaff69903723e3", - "format": 1 - }, - { - "name": "plugins/modules/cloud/rackspace/rax_cdb_user.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "57e216faeaf3e709dd5b9a357bc46b177c502ed5faa6e05e41072ebfd7fe3995", - "format": 1 - }, - { - "name": "plugins/modules/cloud/rackspace/rax_clb.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8c48721843ae0d6888b58b6d4565d22a5be02937f60cbe3f42d39d7c376cb8e4", - "format": 1 - }, - { - "name": "plugins/modules/cloud/rackspace/rax_clb_nodes.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8c9a278d819787913d75be28f443ba31c8c2a108bb63c39352d35cbdb600d067", - "format": 1 - }, - { - "name": "plugins/modules/cloud/rackspace/rax_clb_ssl.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2134871c3e02d0529e9f1ca574f24140c8d6f6abeaf8a6ba96c0105b7541e489", - "format": 1 - }, - { - "name": "plugins/modules/cloud/rackspace/rax_dns.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ccbac0266e76a9edf1afcf903675822f0677a3f4d6000d729de7cffc4b54677f", - "format": 1 - }, - { - "name": "plugins/modules/cloud/rackspace/rax_dns_record.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d6c8e88e7e4fd6eb2ede8ba0ee9c59083f24204160ae3fddfe6677b036c63491", - "format": 1 - }, - { - "name": "plugins/modules/cloud/rackspace/rax_facts.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6530466f3a66bba556b2ebe9c4b11cf825a50f2aa9cdb9de400030d8f6852bea", - "format": 1 - }, - { - "name": "plugins/modules/cloud/rackspace/rax_files.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b6bbf1c17d93faa948635783ca33f05597f6f69d81b7c3d24c4c417c428782ba", - "format": 1 - }, - { - "name": "plugins/modules/cloud/rackspace/rax_files_objects.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "77fa10c9bf0457aef89bbd5c66d373fa59481009907c963dd7e81b7474d76529", - "format": 1 - }, - { - "name": "plugins/modules/cloud/rackspace/rax_identity.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ec5013cf01d52790b1e04c5f4ccb0c7e8ab5b2e2fe67330fcd55ba1c63e1d4dc", - "format": 1 - }, - { - "name": "plugins/modules/cloud/rackspace/rax_keypair.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "db1b2855da2f959529c5b377b2b7c7ea8c4a331c4fe507504d57370218b83fa7", - "format": 1 - }, - { - "name": "plugins/modules/cloud/rackspace/rax_meta.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7431032026660e219c8878da55c4f92f11caa614f4d08b7623c02ce28bd3b59e", - "format": 1 - }, - { - "name": "plugins/modules/cloud/rackspace/rax_mon_alarm.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "aa69ba0c22d53762a053288d543c44a9170f6301904569b894a2ef9065c00af9", - "format": 1 - }, - { - "name": "plugins/modules/cloud/rackspace/rax_mon_check.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "38726fb34a9b467da5545307324b3086e52a3c149ea371ff82e175986238dc42", - "format": 1 - }, - { - "name": "plugins/modules/cloud/rackspace/rax_mon_entity.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4a5953d1f6359f10d254fcbebcb00d8a635958699ae75291517e7756a226a0e2", - "format": 1 - }, - { - "name": "plugins/modules/cloud/rackspace/rax_mon_notification.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a6119af771292fd53cb0c145c91d0064e726e232da3f369f31d854208d83b5b3", - "format": 1 - }, - { - "name": "plugins/modules/cloud/rackspace/rax_mon_notification_plan.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fe003304bbf888f7b68ced5e4dc8348a14d1ae27189b042f71a2855ccc1040fd", - "format": 1 - }, - { - "name": "plugins/modules/cloud/rackspace/rax_network.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d09efe5dc0a5abc207de3126078da30dddfa08fdd6fe5134c95c17b4c6d21597", - "format": 1 - }, - { - "name": "plugins/modules/cloud/rackspace/rax_queue.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5f35abbe7a421095cddd7477be5d6abc598205d1dcaebb9522b39c69cf6e2b7e", - "format": 1 - }, - { - "name": "plugins/modules/cloud/rackspace/rax_scaling_group.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "de94c72c31698ef52fab96fa5de59a280fa501c39925048c6a82c6117454da9c", - "format": 1 - }, - { - "name": "plugins/modules/cloud/rackspace/rax_scaling_policy.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8dfc1a96c6377f3000222233ffd13ab883bd66ddb706dc3fa1f810a7a4c066a4", - "format": 1 - }, - { - "name": "plugins/modules/cloud/scaleway", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/cloud/scaleway/scaleway_compute.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a8f1883b5813315b1c406b285ce00016aa5312559637765b054126e81d818350", - "format": 1 - }, - { - "name": "plugins/modules/cloud/scaleway/scaleway_database_backup.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "dcd9a15702a9b326e06210c5c14b402504efae5c23f86242921fe745d321d2a4", - "format": 1 - }, - { - "name": "plugins/modules/cloud/scaleway/scaleway_image_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "da6f412ff90955c2ba5ade197e163dc3c36458c036d36d30b0bee1c96e974e43", - "format": 1 - }, - { - "name": "plugins/modules/cloud/scaleway/scaleway_ip.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "96e4fa6eb157e3553fedf94669681c529add87cabe8aeab442c9f7173f4f398f", - "format": 1 - }, - { - "name": "plugins/modules/cloud/scaleway/scaleway_ip_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5abf4b2c16ef564f485d7be74882b003c3934ded53fe9115022808d88bd90db1", - "format": 1 - }, - { - "name": "plugins/modules/cloud/scaleway/scaleway_lb.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a99b738a4a7baffa0ab13ab5ed273bb0d4a0c4ee84a9121dbc7def22fdade7b9", - "format": 1 - }, - { - "name": "plugins/modules/cloud/scaleway/scaleway_organization_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6e29446afc7823fbf66142c7296c24418538474da1eb6180a4fe3ae6e97f3477", - "format": 1 - }, - { - "name": "plugins/modules/cloud/scaleway/scaleway_private_network.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6f95de713da409754100cd96cee21a007082f2bcc93bcbe5e0cc3b85b0324918", - "format": 1 - }, - { - "name": "plugins/modules/cloud/scaleway/scaleway_security_group.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "41072a0cee6fb8794ea5214ef95f065b9b1cda8ee36296966c529e867655e27f", - "format": 1 - }, - { - "name": "plugins/modules/cloud/scaleway/scaleway_security_group_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7fa7d5011ba75d648159310c0c1fc26b573d56c973166591c23044d1e72c5492", - "format": 1 - }, - { - "name": "plugins/modules/cloud/scaleway/scaleway_security_group_rule.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "03916e5b092a441d6945ccae89c93968f21083bd551ddb48e9f0b280b9abec7e", - "format": 1 - }, - { - "name": "plugins/modules/cloud/scaleway/scaleway_server_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4c5955d275d799b7879ce024888d3d62288c3e19f377b42bd8e22c4d366915b7", - "format": 1 - }, - { - "name": "plugins/modules/cloud/scaleway/scaleway_snapshot_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6127b39c7c7f02fdd516efe60384f4d0b47165e4fb6ba81b5f96a7e42f559983", - "format": 1 - }, - { - "name": "plugins/modules/cloud/scaleway/scaleway_sshkey.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c8b5023d5077dfba435674012a27ea807d9352709feacc2eed6b1e5f86f8e582", - "format": 1 - }, - { - "name": "plugins/modules/cloud/scaleway/scaleway_user_data.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "eb81200dec6e35c01ca780a99c933f255f35c5ce84a3f3f9a1fb24547f295cb7", - "format": 1 - }, - { - "name": "plugins/modules/cloud/scaleway/scaleway_volume.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "49173705942bca167ab4caf077dd0ada20360272352443e1a341e624e2d5e77d", - "format": 1 - }, - { - "name": "plugins/modules/cloud/scaleway/scaleway_volume_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c85b5e98e0b397f944ec775736381d6244c8a983117449366f58df4dd11c49a5", - "format": 1 - }, - { - "name": "plugins/modules/cloud/smartos", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/cloud/smartos/imgadm.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e7bfa8f3eb4edeb4f1f9e51a4a2c5f17a4390513ff3f2375dc78ab27e5352208", - "format": 1 - }, - { - "name": "plugins/modules/cloud/smartos/nictagadm.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "32df37987dc72376f00e17b852b236cb78a6827eddad3459fa8f022eb331494b", - "format": 1 - }, - { - "name": "plugins/modules/cloud/smartos/smartos_image_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e9c4e315cdd50c84ede09c145a86eacb98515d36fc87251ce11759d26de30200", - "format": 1 - }, - { - "name": "plugins/modules/cloud/smartos/vmadm.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0971c82f4b903c4b66b249f6c334ca5e8807d8a6e331259df1e6b3a857b3cf79", - "format": 1 - }, - { - "name": "plugins/modules/cloud/softlayer", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/cloud/softlayer/sl_vm.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9b4ec3201ad6d82903722224f35f39c899ee94d96596ada4b112c658d55d8b76", - "format": 1 - }, - { - "name": "plugins/modules/cloud/spotinst", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f52f9bdf880e006dbdbbe2882289d506e89c50c4d1bad2ffc45706c7fc41eda2", - "format": 1 - }, - { - "name": "plugins/modules/cloud/univention", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/cloud/univention/udm_dns_record.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "828fafca6838e91191a827d40961539a3820507d08f82b0cb6dcdaae53d9b9ba", - "format": 1 - }, - { - "name": "plugins/modules/cloud/univention/udm_dns_zone.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c18407922bc2d77ecfa7e650c0bd7a90eb896fe07e6c9eac191d0e68f63df2e1", - "format": 1 - }, - { - "name": "plugins/modules/cloud/univention/udm_group.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9ee3d7282c96a82bf24b58260e3fef9a376e9c2d768d0dd7c4e1ec648288fefd", - "format": 1 - }, - { - "name": "plugins/modules/cloud/univention/udm_share.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b9ed6ebf6e752a6cb101f24248ec3e319f1965287de8a61a638530701a6137e9", - "format": 1 - }, - { - "name": "plugins/modules/cloud/univention/udm_user.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "54edc8d89620ddde5caed48faea3e0341ab0d4dff6605c512d67767468fa49ff", - "format": 1 - }, - { - "name": "plugins/modules/cloud/webfaction", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/cloud/webfaction/webfaction_app.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f7255baa5e5a1f4ce32ccf47ba28e520f8216cc456e76ca4e2f58011db66f55e", - "format": 1 - }, - { - "name": "plugins/modules/cloud/webfaction/webfaction_db.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "33d9e30418f53beef9333cac841481ec7a12104b9b9dd83827509662e983b36a", - "format": 1 - }, - { - "name": "plugins/modules/cloud/webfaction/webfaction_domain.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7b7e7f59ae55508f5fa363376723a14dc3ab3d7823c962eb37f54c7d01381646", - "format": 1 - }, - { - "name": "plugins/modules/cloud/webfaction/webfaction_mailbox.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f7bd28802ef9156cb65184bb6b2c890fe68011c50039032b364e411a7fe778cc", - "format": 1 - }, - { - "name": "plugins/modules/cloud/webfaction/webfaction_site.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "251596c3e652ec903f30714b4ed852fbb57ddfeb7a37e11e2189dc2d52a98655", - "format": 1 - }, - { - "name": "plugins/modules/cloud/xenserver", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/cloud/xenserver/xenserver_guest.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "75fdda2a572d73b2248badab234d9a4c91caf035be8bbf450652fc567aef5c6b", - "format": 1 - }, - { - "name": "plugins/modules/cloud/xenserver/xenserver_guest_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "55a88716e40077de6218ae32f47d73349dd6993469e02a0c0d867b6638991280", - "format": 1 - }, - { - "name": "plugins/modules/cloud/xenserver/xenserver_guest_powerstate.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "512b73487169482e8887c48e6f3278950736c93a5c2a4c698b149e80217bf270", - "format": 1 - }, - { - "name": "plugins/modules/clustering", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/clustering/consul", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/clustering/consul/consul.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4118f4c040b4c3255e9b585aef388871098bb6da386ef3dfb6eff2a62621b7d7", - "format": 1 - }, - { - "name": "plugins/modules/clustering/consul/consul_acl.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b6f145e052de83a3d5fcdb12fcc783b7c14b42be19bee84b021e28bdd5e4d2b6", - "format": 1 - }, - { - "name": "plugins/modules/clustering/consul/consul_kv.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "470aac4466c9a747514dcc73b3c50cbab8649050de192563f35d0054820d60ae", - "format": 1 - }, - { - "name": "plugins/modules/clustering/consul/consul_session.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cc7f0c964b98a2bd770173babef63981ba77fdba3581f31d844caa7aaf2fe723", - "format": 1 - }, - { - "name": "plugins/modules/clustering/nomad", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/clustering/nomad/nomad_job.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f340d59640dbfc609d17914eaae66d0abb75aed40548448b92e88b3070c04064", - "format": 1 - }, - { - "name": "plugins/modules/clustering/nomad/nomad_job_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9fe39694e1781829ce8bd562b30f040127f5e1e2d7a977c82db3202fe0b00352", - "format": 1 - }, - { - "name": "plugins/modules/clustering/etcd3.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "eca366113dd69573ccb5c95250ceedfbbec34523cc23ddb2406e3ee9bab01e75", - "format": 1 - }, - { - "name": "plugins/modules/clustering/pacemaker_cluster.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4386ec559c0dd166cb6b6bf4b2f43f3368c2da231653b3f4027d64fb921b1e48", - "format": 1 - }, - { - "name": "plugins/modules/clustering/znode.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "09e0d13c350448cb46706cc1106c643d1ede2a94fd54eb4c9bf6bb5a6b36839f", - "format": 1 - }, - { - "name": "plugins/modules/database", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/database/aerospike", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/database/aerospike/aerospike_migrations.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "52c1641f7f943c727a0d6b8eab2b292b010d9347f28396adc4e8c75159dbb08f", - "format": 1 - }, - { - "name": "plugins/modules/database/influxdb", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/database/influxdb/influxdb_database.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5f7f03aa049ab52e4dbfb809c86a65d026f518047de475693616d52a611090cc", - "format": 1 - }, - { - "name": "plugins/modules/database/influxdb/influxdb_query.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3a8b781c48ea54c78d2a8ac358ccb5f901746e79b0d0da842b5d06068ce6b1c8", - "format": 1 - }, - { - "name": "plugins/modules/database/influxdb/influxdb_retention_policy.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "00fba18126835c5c2e9e79ad1a3e0fea04613c9718839ce304bd5fe48a0450de", - "format": 1 - }, - { - "name": "plugins/modules/database/influxdb/influxdb_user.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6be29679e39cd622bb5eeaec56a6d802992a2e76a66a1058d478fa72ecef3db2", - "format": 1 - }, - { - "name": "plugins/modules/database/influxdb/influxdb_write.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f5e2d773ee043f148680048a538b3a61d529ea7628b431149ca7f8c51057dbf6", - "format": 1 - }, - { - "name": "plugins/modules/database/misc", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/database/misc/elasticsearch_plugin.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "09a6283b244e18cdd17f34bcbf8dcfea1c85c7aeba635e033e4b1d7475f4d484", - "format": 1 - }, - { - "name": "plugins/modules/database/misc/kibana_plugin.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f9ecdf864136ffaeb96c2239570ef3de82852d38cc6d522cb801590c62d4a07a", - "format": 1 - }, - { - "name": "plugins/modules/database/misc/odbc.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1a07ed4cd1edfc030bd2bc888c365b50d44955cb82d55a69564f524c42a6591d", - "format": 1 - }, - { - "name": "plugins/modules/database/misc/redis.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b2d25f0de427359721101190758005d983d3d8f040fcd4a4eeb1453b90e4982b", - "format": 1 - }, - { - "name": "plugins/modules/database/misc/redis_data.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ff6b0f5c3b03c74c23565cea5bc0598d1107145ca22ce05c18f077d2c14546b2", - "format": 1 - }, - { - "name": "plugins/modules/database/misc/redis_data_incr.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f098910c5bfeb5edebd3a6ce2a9156d51c534dce997697f219d2a8eea297a27d", - "format": 1 - }, - { - "name": "plugins/modules/database/misc/redis_data_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "671fc3bfc6d1b36aa4f2ae686d2d5fc180a1decbd61efe3f03bcada8b29da0a8", - "format": 1 - }, - { - "name": "plugins/modules/database/misc/redis_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "50a909e9a9cc3d2b74c3146d45a444f24234dca68e399a18474d8fbdae19d5dd", - "format": 1 - }, - { - "name": "plugins/modules/database/misc/riak.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e4c8bf321e77871edc3c0a5c342707a50e9c2571fca0ab5bfd8197c682a28b80", - "format": 1 - }, - { - "name": "plugins/modules/database/mssql", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/database/mssql/mssql_db.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "10836be6d1f0c2d46a5ad956f66a98f0ee983de1660c462d3220d377a14ce6c2", - "format": 1 - }, - { - "name": "plugins/modules/database/mssql/mssql_script.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fce6238160aaf08763818017d8bd5a211bf2dd8c478daecaa0584166011d58b6", - "format": 1 - }, - { - "name": "plugins/modules/database/saphana", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/database/saphana/hana_query.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f0503130e11a7444e652e67b08fce9b7ae64fe7e14b201857822558538274387", - "format": 1 - }, - { - "name": "plugins/modules/database/vertica", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/database/vertica/vertica_configuration.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ff26d24f57fbea1fcf16e64ce0eff1417624bcf5224da566422a6086512a8c19", - "format": 1 - }, - { - "name": "plugins/modules/database/vertica/vertica_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5f270fb5c6863524079c13320d7816bb446f48b485e5fda83fba3d76183a70a9", - "format": 1 - }, - { - "name": "plugins/modules/database/vertica/vertica_role.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4127075612f26e1b253766e24f5976861a9f3a985cdfc0150c46bccf394f7ba0", - "format": 1 - }, - { - "name": "plugins/modules/database/vertica/vertica_schema.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "69e66027dd2f802db9e894e4e45ba61e8f7324d0439807f06f1e0766508e371c", - "format": 1 - }, - { - "name": "plugins/modules/database/vertica/vertica_user.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d9c75983531caeba4f82346be16d82e759af99ea6ab5e55253b68cce5919e394", - "format": 1 - }, - { - "name": "plugins/modules/files", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/files/archive.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3a0715d0aae4143b1f42dc73f560afbfa85782c37ef1645840e27400da7534d3", - "format": 1 - }, - { - "name": "plugins/modules/files/filesize.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "181ff76460418648e0b4dd3906d3d7699eb7ebe08eb2b532aa57a295ac06237d", - "format": 1 - }, - { - "name": "plugins/modules/files/ini_file.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ca49a57202bf72b8b079bbbcf5cfd3e33e530e549bd1ca1626f328a11b8b2839", - "format": 1 - }, - { - "name": "plugins/modules/files/iso_create.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e22d344094cca4e10a77f281172b99e2ff51c71d16f63db2088d4cb5cca1dcc0", - "format": 1 - }, - { - "name": "plugins/modules/files/iso_extract.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "45e148bea9a28b93070734fe860f594c56b645deecd5799fcea67e8ac6c8d0e2", - "format": 1 - }, - { - "name": "plugins/modules/files/read_csv.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d62a1f5b65ca81a1ba775829d7adc5e175a776de15e544cf85ea321ded35c145", - "format": 1 - }, - { - "name": "plugins/modules/files/sapcar_extract.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fd7fec63a0695a033b2b637499b1f6ace8dd36bd9656f912632260dbc04ae88d", - "format": 1 - }, - { - "name": "plugins/modules/files/xattr.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7e921b2dae03b00730009593599edb959ad3ff62419caeb3cbeaecdd9be9f2c2", - "format": 1 - }, - { - "name": "plugins/modules/files/xml.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "20c53e6a1125b9d310540e60133de640668297ff31b91842bdd659ab0155f688", - "format": 1 - }, - { - "name": "plugins/modules/identity", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/identity/ipa", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/identity/ipa/ipa_config.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8338f390c1e9ac774c095ada6731502c1280e30b01bef293a6651ad54d0bfe8b", - "format": 1 - }, - { - "name": "plugins/modules/identity/ipa/ipa_dnsrecord.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "88fd68dcfd0725e575ce7fac94cb8eb9c74024e83bb0eb5dddec34d568725ebd", - "format": 1 - }, - { - "name": "plugins/modules/identity/ipa/ipa_dnszone.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9944ce41cae935b07410a1a482d2d4cd1c6f07f7060a360e6888e67992075a36", - "format": 1 - }, - { - "name": "plugins/modules/identity/ipa/ipa_group.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "70c065752e9e80713862f8fb3fb85f60219ac80d97a49139288bf6dd335ad168", - "format": 1 - }, - { - "name": "plugins/modules/identity/ipa/ipa_hbacrule.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8350663990ec7b9b46879f317760e64e9eb9ad080170f8a3ab66f26022623cd5", - "format": 1 - }, - { - "name": "plugins/modules/identity/ipa/ipa_host.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1672d2a59433c0c823dde1d227c7d78caaf492f981d55c6333ba950ba298907c", - "format": 1 - }, - { - "name": "plugins/modules/identity/ipa/ipa_hostgroup.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ae6569872367a3b15727facea24ff4322cdf35512b1dcd8c4889997943eeb1d8", - "format": 1 - }, - { - "name": "plugins/modules/identity/ipa/ipa_otpconfig.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "dcd17661ce19b040683bbecd506bdb2ec5ed2909c20d71c0a814bb4f05fee345", - "format": 1 - }, - { - "name": "plugins/modules/identity/ipa/ipa_otptoken.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ffaa1a58c973d8794d9a1797bd75bccbae783699e1ea87d4bbb7b3ed434d72d4", - "format": 1 - }, - { - "name": "plugins/modules/identity/ipa/ipa_pwpolicy.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "91f450bc4c6329e67cdf920e7f8499ffb7d27975b0a548ae2110354ed5e2e281", - "format": 1 - }, - { - "name": "plugins/modules/identity/ipa/ipa_role.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "24e469a9d45178e0fbdfb4635f525640cd1033ec559f45978e4ba7cc42fb95c6", - "format": 1 - }, - { - "name": "plugins/modules/identity/ipa/ipa_service.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3177e872cdf023c8a7e8bd65bd09e2ac102b2c3565c40ee5dc9d8c0fd8ddfcd6", - "format": 1 - }, - { - "name": "plugins/modules/identity/ipa/ipa_subca.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "932c8bd910f72a6fd20831704f96358bfd3b96e94ff8346a09a5c401a27087b8", - "format": 1 - }, - { - "name": "plugins/modules/identity/ipa/ipa_sudocmd.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "58d95fc267fc9d319ff05df6aaab1fb39df187d48bed52d497d92a30c54750ff", - "format": 1 - }, - { - "name": "plugins/modules/identity/ipa/ipa_sudocmdgroup.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a8fbc39a66b0356ec18f8468789e6d4ffb5a1fae4f0e6d68e8837821d2c138f9", - "format": 1 - }, - { - "name": "plugins/modules/identity/ipa/ipa_sudorule.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "15ee194ba2afa0982721aed91fdc69f93aee33b45af426efea615e3a03016f51", - "format": 1 - }, - { - "name": "plugins/modules/identity/ipa/ipa_user.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "97c135b60e1aca5fc78d7af59cbf5f5dbe14b0ccd93951bc10450698596c1aee", - "format": 1 - }, - { - "name": "plugins/modules/identity/ipa/ipa_vault.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d2ee238e7dab861eec17312d74cd513b493ec69b41e0d225501c8668d61837d2", - "format": 1 - }, - { - "name": "plugins/modules/identity/keycloak", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/identity/keycloak/keycloak_authentication.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9c90b1d14c16a6a61e114fcf81cecc8a37c0205d45328b3a2d37e4c26f89bbd1", - "format": 1 - }, - { - "name": "plugins/modules/identity/keycloak/keycloak_client.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6afcc0997e09859e999b6988fc8313c2b6ab6881593c32202caffb9a00d4e8d9", - "format": 1 - }, - { - "name": "plugins/modules/identity/keycloak/keycloak_client_rolemapping.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "591f181bff4630f8102b105189ff5b3a13de126520d1d28def344d175527979b", - "format": 1 - }, - { - "name": "plugins/modules/identity/keycloak/keycloak_clientscope.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5afc1453d8f5360849ee0c3290c0c838f0aada90e1812928e77a1b1e7a5ffd18", - "format": 1 - }, - { - "name": "plugins/modules/identity/keycloak/keycloak_clienttemplate.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c950ef71abd6035f3861bc568f993b414bf1a24e163c7f486ae529ac5a92cb24", - "format": 1 - }, - { - "name": "plugins/modules/identity/keycloak/keycloak_group.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "49d81d24c71674584f1a762d4db1f73d7a13ba78fc367f3961e6e2cafe0c5329", - "format": 1 - }, - { - "name": "plugins/modules/identity/keycloak/keycloak_identity_provider.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2d458b33b61e2972f529be3fc2b9818bc0bb9511fd2ad1833b8d0ee11032261e", - "format": 1 - }, - { - "name": "plugins/modules/identity/keycloak/keycloak_realm.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6ddd98908cb2d26b7a3627e563b5e8b26335e23d6f8cb7d4675399dc891dd19a", - "format": 1 - }, - { - "name": "plugins/modules/identity/keycloak/keycloak_realm_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "dd2ffd0fbe413e17ef575a432a2ce8d251d3d634f5dcaaa0b70dfd20d2ba22b1", - "format": 1 - }, - { - "name": "plugins/modules/identity/keycloak/keycloak_role.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ad5b8b8c78cf44c6309e19858709eea202cb2a8f20f27e85fc3ea9260bd1b80a", - "format": 1 - }, - { - "name": "plugins/modules/identity/keycloak/keycloak_user_federation.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "100992e28832d1fea678013004dbc8400871bba27af2426c2f240b0eaf4da03e", - "format": 1 - }, - { - "name": "plugins/modules/identity/opendj", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/identity/opendj/opendj_backendprop.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e45d6e5a6145f58dec874da17714d239170c25aa3d6b6bed4e7ab5d45aa92e9f", - "format": 1 - }, - { - "name": "plugins/modules/identity/onepassword_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d0e2a34b5efebec54d9dce104527972c13fce6c7e04ef25220a8073f4d385d35", - "format": 1 - }, - { - "name": "plugins/modules/monitoring", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/monitoring/datadog", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/monitoring/datadog/datadog_downtime.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c4671fae964f84c50e802b97fc64b2fa39173f787741887a6772d6a300184b69", - "format": 1 - }, - { - "name": "plugins/modules/monitoring/datadog/datadog_event.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "203ee66689572ae405f692c6a34b24d12da75ef835feaf512ee25f179e204077", - "format": 1 - }, - { - "name": "plugins/modules/monitoring/datadog/datadog_monitor.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6c1c03834a375f842171002ac31ef4204c4830eb41283263b954704e23353d66", - "format": 1 - }, - { - "name": "plugins/modules/monitoring/sensu", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/monitoring/sensu/sensu_check.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "15aa5b61a60a0c812caf893e14c76f55150fa535edbba58a698fa0b07a95687b", - "format": 1 - }, - { - "name": "plugins/modules/monitoring/sensu/sensu_client.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "658c2f425bd755eca7ff3317d9bc4ae20ab2d4650b8659b9846455a4cf650e84", - "format": 1 - }, - { - "name": "plugins/modules/monitoring/sensu/sensu_handler.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d95a2dcc85c1c9ccb51ef8cd0f6412a841db023dfd3412b47bd8aad17e5608fe", - "format": 1 - }, - { - "name": "plugins/modules/monitoring/sensu/sensu_silence.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ae9e6d8b69a746cf8e985499ed73c177abb02fdd13bbd04a501a8f76fff96fbc", - "format": 1 - }, - { - "name": "plugins/modules/monitoring/sensu/sensu_subscription.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "530a5fd15a37728a1fd346f68300ecc4fcf28904c1cf3663875006514f0db31b", - "format": 1 - }, - { - "name": "plugins/modules/monitoring/airbrake_deployment.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6aa925fba8833cbaa4a23775684646db31a7f1410c4688392ced89db20bbcade", - "format": 1 - }, - { - "name": "plugins/modules/monitoring/bigpanda.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "dcc88a1f79d5f53d3fe5e69d911a01177f063a9aa52428c22b4564d306f35ec4", - "format": 1 - }, - { - "name": "plugins/modules/monitoring/circonus_annotation.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "57172616325c7ece221ed3f154e59473f1bfe52c802dcaf0fe0f870133f185b8", - "format": 1 - }, - { - "name": "plugins/modules/monitoring/honeybadger_deployment.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "291189d8cb646f5837e39daceeebfd8e54b4f806430deea58c4d54eef50ab709", - "format": 1 - }, - { - "name": "plugins/modules/monitoring/icinga2_feature.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "770edfacd0187f36c9bc94fc88df9fbe51dc29ae1dab5065dbcbd0b0043a089d", - "format": 1 - }, - { - "name": "plugins/modules/monitoring/icinga2_host.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "46b696ade815c4a19e928de8ca0ecdcfe20754bf55cd1f5ace8554daaded778c", - "format": 1 - }, - { - "name": "plugins/modules/monitoring/librato_annotation.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d9f41d406bfe62d78ad1a042c78019c6fd4df50632213dd5a2d619a2e2bcc1ba", - "format": 1 - }, - { - "name": "plugins/modules/monitoring/logentries.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "39eda48181ea6b93f08876a2f9db6b3c22693d848dbb07d6f6592a8adda50152", - "format": 1 - }, - { - "name": "plugins/modules/monitoring/logstash_plugin.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1d84f5ccd70f2dfdfb0f306ed675920972d332cb07b9d1f7997ee9eb16b6dd0d", - "format": 1 - }, - { - "name": "plugins/modules/monitoring/monit.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f0e631c78c8748e568fbc1624ac2831861087b07f88cac56cd995602aeb3fb89", - "format": 1 - }, - { - "name": "plugins/modules/monitoring/nagios.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8f3d329e518de7d3efb7cc6b8d96dd17f420a22134f61012b605e579dd365a7e", - "format": 1 - }, - { - "name": "plugins/modules/monitoring/newrelic_deployment.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5bab501cf9754d7a6c46ae2977fec718592d45efae4d4cd5a29652e6f76bf33d", - "format": 1 - }, - { - "name": "plugins/modules/monitoring/pagerduty.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cafe39cf6372187f9c3ab1aa1caedbb31e329474f46662be6dab7247c8db3e10", - "format": 1 - }, - { - "name": "plugins/modules/monitoring/pagerduty_alert.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c816f9a62a0c0ba8c520986f4918945877a7e214de0693da2b444e3550a79419", - "format": 1 - }, - { - "name": "plugins/modules/monitoring/pagerduty_change.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7f8b9d10f9edd7c2a7c896a660f920faa975d680ed799eb738ec7277205e748a", - "format": 1 - }, - { - "name": "plugins/modules/monitoring/pagerduty_user.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "afe68c52a1fee0a441f79308f3e3f8fb296d9e5193bf74cb10b7a611e2a90c5e", - "format": 1 - }, - { - "name": "plugins/modules/monitoring/pingdom.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "19b5785687a9151584a01ce49b9321d1cb4f4fb9a105e8c53a6e10654b1a38ab", - "format": 1 - }, - { - "name": "plugins/modules/monitoring/rollbar_deployment.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d281b9e5f08730d58c9aac003d90b45151f9819eb871dd900e63ab3d882f5998", - "format": 1 - }, - { - "name": "plugins/modules/monitoring/spectrum_device.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "883564d265cd219779f52beb177c1eee686445277aec016a0000a9734bb3f426", - "format": 1 - }, - { - "name": "plugins/modules/monitoring/spectrum_model_attrs.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a67e3c3ee88a04add9cd67e38778c14b56e9dec145c843f4cbafa550fd9851a9", - "format": 1 - }, - { - "name": "plugins/modules/monitoring/stackdriver.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2be5529a5b6f3c9366af6e422fafeea193922831655edd3bf7f7d98c440fb506", - "format": 1 - }, - { - "name": "plugins/modules/monitoring/statsd.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "792e64a34b4d66ef704505a6464ab5d809822c2cf4277662559b3257b023f903", - "format": 1 - }, - { - "name": "plugins/modules/monitoring/statusio_maintenance.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f86b31e7026fa92e312f3196ff270441d9fe75a5e67886bcc1b8c9e3e8d12459", - "format": 1 - }, - { - "name": "plugins/modules/monitoring/uptimerobot.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b5aa626e5c790d9b21ef75af42ca78551c07e38e3539ce6dcafcd638cfa8d9ff", - "format": 1 - }, - { - "name": "plugins/modules/net_tools", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/net_tools/infinity", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/net_tools/infinity/infinity.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "639c7ed7633b97041cd61f657ec7d60d28db516cab49fac6c0cfec5a01c013de", - "format": 1 - }, - { - "name": "plugins/modules/net_tools/ldap", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/net_tools/ldap/ldap_attrs.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "26070ca9bf3bfd37884672ad9335c2a7706298645e84bac4c259bdaab4269f73", - "format": 1 - }, - { - "name": "plugins/modules/net_tools/ldap/ldap_entry.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7c1beee28d7661cce71496558a7a72f3afc3450e92bd5da44c5561192bf34853", - "format": 1 - }, - { - "name": "plugins/modules/net_tools/ldap/ldap_passwd.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6ba81db2b15e61479f3621ea0f9c1ee360a6938388349c842ee7cc39d4affaac", - "format": 1 - }, - { - "name": "plugins/modules/net_tools/ldap/ldap_search.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "27ace47cfda1f029f3fd0f87e80d19d4170df442a2da819adaf29c169e86c933", - "format": 1 - }, - { - "name": "plugins/modules/net_tools/pritunl", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/net_tools/pritunl/pritunl_org.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "200240d97abc57f33f1a19342dac1cc7586a35fedb314cc23770567f5af6a5be", - "format": 1 - }, - { - "name": "plugins/modules/net_tools/pritunl/pritunl_org_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6b8662b1c14487caf1366ef5e99c84e1b5baeb07f1c7d28d23207a1f3d3c46a7", - "format": 1 - }, - { - "name": "plugins/modules/net_tools/pritunl/pritunl_user.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fa3c63e2d3575ce83371962f14da45413042adcb058eece23edb26b80e4337f5", - "format": 1 - }, - { - "name": "plugins/modules/net_tools/pritunl/pritunl_user_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "58e441115653a6326381d3d25bfd37d2a73c52624a67c8432a886baf4ed873dc", - "format": 1 - }, - { - "name": "plugins/modules/net_tools/cloudflare_dns.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "92ca2752e2212e77e6cc3a089a6a72f2a20983ebed40c8edf0e1ceaf18ace10a", - "format": 1 - }, - { - "name": "plugins/modules/net_tools/dnsimple.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0dbb97d863fd4a2fff967c39ea1ea12c18f525db25090b6de23239a7ee1e859e", - "format": 1 - }, - { - "name": "plugins/modules/net_tools/dnsimple_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cd640688d78766e01ab5ff644b82807ee3af3114a8195a482a7f8a6773a32d64", - "format": 1 - }, - { - "name": "plugins/modules/net_tools/dnsmadeeasy.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a4e6ee3395aa9b100b5f9e0e66bb721bcf9688822833ca3f821d977027961c66", - "format": 1 - }, - { - "name": "plugins/modules/net_tools/gandi_livedns.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "93cbd36bb0cb57ab866445984eec096389e81449ede51e141b22284eada70326", - "format": 1 - }, - { - "name": "plugins/modules/net_tools/haproxy.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e406159197e286963c9b16223af8602f7347cb22dc6f02345512b8ab2e1ddc38", - "format": 1 - }, - { - "name": "plugins/modules/net_tools/ip_netns.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7703c45b7a46aea0d992130cafc0922dc74d926266b8f908adc15c6eef1cfa29", - "format": 1 - }, - { - "name": "plugins/modules/net_tools/ipify_facts.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a3cfe7e782b99e108e034ad45b38f3a686bd057c13a405e13b4082c9d4655ba8", - "format": 1 - }, - { - "name": "plugins/modules/net_tools/ipinfoio_facts.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ffefdf9402a767ea1aa17675b8be1d868d68e71ef5292b26ea0266a856914208", - "format": 1 - }, - { - "name": "plugins/modules/net_tools/ipwcli_dns.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "27f69f073ce4bd49b82bee81a74f81650a89517936b723a1641f203c281ac406", - "format": 1 - }, - { - "name": "plugins/modules/net_tools/lldp.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0bebe90d2f24144019108f71e7dedb4ed60ec93abe5e96fce73196192de34afa", - "format": 1 - }, - { - "name": "plugins/modules/net_tools/netcup_dns.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "17d6af51c3f484d8415565c30657315387fe7b669e3f7646aa1f5b9ffa444619", - "format": 1 - }, - { - "name": "plugins/modules/net_tools/nmcli.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e585180782651197b35c000a62b28c94f599beea53c963b4b44a4a4733b9e833", - "format": 1 - }, - { - "name": "plugins/modules/net_tools/nsupdate.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3ff24f4b701c08dd89733f207803d8e05e37b0ea0d40ea00f3c2b406c94eddb7", - "format": 1 - }, - { - "name": "plugins/modules/net_tools/omapi_host.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "32824ddf8d839bdad9decf1161bcee7301af665604be924c98b3378e13315e12", - "format": 1 - }, - { - "name": "plugins/modules/net_tools/snmp_facts.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "76246be2be66921ccb940983d25eef4bf5b8cb2f2b96b8bb3f9971bda482ee68", - "format": 1 - }, - { - "name": "plugins/modules/notification", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/notification/cisco_spark.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9b91160d8b53c538dbdeeb45a5584658fcd1a4c57f43ba8a3237a91860a99b02", - "format": 1 - }, - { - "name": "plugins/modules/notification/bearychat.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8f224a3485783e66fbde1636e5131e561fd1a9006ffe2ec5d24188c07736f5c8", - "format": 1 - }, - { - "name": "plugins/modules/notification/campfire.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d22a3da654653ddb964eb55db9164c254860f4430dbe8b505b6945f220294bea", - "format": 1 - }, - { - "name": "plugins/modules/notification/catapult.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f1bc195bce4b7de9e4e5c612fba7c422e104af61e77d79860c7dfa69b8b0f15e", - "format": 1 - }, - { - "name": "plugins/modules/notification/cisco_webex.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9b91160d8b53c538dbdeeb45a5584658fcd1a4c57f43ba8a3237a91860a99b02", - "format": 1 - }, - { - "name": "plugins/modules/notification/discord.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4526e01b8b1989fa6bd10ad53702eb0115d7e9d213caa2ddca59d86b521af84d", - "format": 1 - }, - { - "name": "plugins/modules/notification/flowdock.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c50deeb4589cfd2ae9055e2ca708acceaf41f8c4e705a2f3c84bc4d5093bda9e", - "format": 1 - }, - { - "name": "plugins/modules/notification/grove.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b743647c9e91e766f9d75ca332fce7f1ee2d53f1a60c25e30aa1da8c54fc42fd", - "format": 1 - }, - { - "name": "plugins/modules/notification/hipchat.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "46ca51483cbd2b779fba4a7a938d4b2e4088eab98423a196588dbf5c83287e90", - "format": 1 - }, - { - "name": "plugins/modules/notification/irc.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5056a0944304be0cb4585231a68496ecfc2df86c3013ba1b398a17d73ece48c9", - "format": 1 - }, - { - "name": "plugins/modules/notification/jabber.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "38e25af68e39cb333fe7d46308e6798e9884c5df4feb3d99a9b5c55e8a264709", - "format": 1 - }, - { - "name": "plugins/modules/notification/logentries_msg.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "34982c5c0e9aef4d724a068cc3bbb34df2d7e9757d7d2ed620990124d64b9a84", - "format": 1 - }, - { - "name": "plugins/modules/notification/mail.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d321469472ef8dbd1a0c0c06b67c4213df7a11d487ae18b8962ab1ce7302d36e", - "format": 1 - }, - { - "name": "plugins/modules/notification/matrix.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "49291a2a57c72bea087e2afffade0f7f083deb196f8e32dd6d79955bb5b6116a", - "format": 1 - }, - { - "name": "plugins/modules/notification/mattermost.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e4ca0cd2ff4e27e91ffa8542531dd77413443690721b78e468d723e3c85278db", - "format": 1 - }, - { - "name": "plugins/modules/notification/mqtt.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "dc3caa21d09f3103a4c21cb7719ed69522760f9221b536e79ad9f9cc52470d8a", - "format": 1 - }, - { - "name": "plugins/modules/notification/nexmo.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "944a2d977cdaf55b8c53861b2ac13ba4808e3e49429be8dea75b38ec028d2b18", - "format": 1 - }, - { - "name": "plugins/modules/notification/office_365_connector_card.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ca2802d019e153833f903a044a08c233555cc5e7476446c6df780b23995bd26a", - "format": 1 - }, - { - "name": "plugins/modules/notification/pushbullet.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0763b7e2415a71cd93764b56b5a4f8e07431b19f657cdfe5f59b1e8c63b8ddc4", - "format": 1 - }, - { - "name": "plugins/modules/notification/pushover.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8d4b6f7686646e0d44a7ad63811b8c1f69927317c2ce8cea4ff855027355c219", - "format": 1 - }, - { - "name": "plugins/modules/notification/rocketchat.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "952dbea3dbfd46a029b9ad19b7a5f3d7659df608a9346f067563fd98f9e8ce65", - "format": 1 - }, - { - "name": "plugins/modules/notification/say.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9427eced754de74bbb015098444c4cee334620980bcf62c4c6f7e687475515e6", - "format": 1 - }, - { - "name": "plugins/modules/notification/sendgrid.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "74a1a142ea29a5519ab4fe938192638ae79b54f40a957dbb7d2b4e3ac4474b87", - "format": 1 - }, - { - "name": "plugins/modules/notification/slack.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e96ef97177e41d87862c20fe5daa14f60230671ba34309b83477fec933c4238c", - "format": 1 - }, - { - "name": "plugins/modules/notification/syslogger.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "abcf172195a8f6b74396dd273e2d9926c0c6bbba773f5949f9565b2cd2aaea07", - "format": 1 - }, - { - "name": "plugins/modules/notification/telegram.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "166e6d4a5b832d22b024dc9395780a807341ebbb6d5a78726dd40d9f5214fbbb", - "format": 1 - }, - { - "name": "plugins/modules/notification/twilio.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bc5913224c8de906d7739278662d6efa7055a88ecc24dd2e568a2c33065b0e23", - "format": 1 - }, - { - "name": "plugins/modules/notification/typetalk.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8b0c5a2c18ec40da946914f93655f144d608fcc4737cca258642c44d69245b42", - "format": 1 - }, - { - "name": "plugins/modules/packaging", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/packaging/language", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/packaging/language/ansible_galaxy_install.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f7662c68a2cd0beb854eb1cb47411a4b5bf7004acfa0cd101898aba88c0afd6a", - "format": 1 - }, - { - "name": "plugins/modules/packaging/language/bower.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1469648267092280b084c97ff84b89cd29656ae25f5c12b23d6a34d6bd21f214", - "format": 1 - }, - { - "name": "plugins/modules/packaging/language/bundler.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b8afe9744c027374c7bb7fce88ed55069f27cbf040447a5f0f04a04b9053012b", - "format": 1 - }, - { - "name": "plugins/modules/packaging/language/cargo.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bba289036c8d3d677f768224f9eed512badd2d001089ab783be6f5a8f5e868a5", - "format": 1 - }, - { - "name": "plugins/modules/packaging/language/composer.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7f2740d5b0c235ca97fd503e4441274bc748d4c5b0dcbe3e227831599f573734", - "format": 1 - }, - { - "name": "plugins/modules/packaging/language/cpanm.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "869b73609aa1f1ba8f2d33ccfed04eec450bcdcf31b710526f2d043aa97c0ea4", - "format": 1 - }, - { - "name": "plugins/modules/packaging/language/easy_install.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a11e3e151595b9b729431aa2a4be23edd5d228870b3876cf95160d4552e2ee14", - "format": 1 - }, - { - "name": "plugins/modules/packaging/language/gem.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2658234014600b059931be2658b92731a7b317a49ad8b87b7a90f4021d2b92af", - "format": 1 - }, - { - "name": "plugins/modules/packaging/language/maven_artifact.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9093a95b922bf4c93af8d371f23f6ec650bc04cb139cbbb3ade69d50b050d5d6", - "format": 1 - }, - { - "name": "plugins/modules/packaging/language/npm.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2ad403903ddfdb432279a0c91640d2bccc6f9ff4fc017f865f144d0cf12c3fa7", - "format": 1 - }, - { - "name": "plugins/modules/packaging/language/pear.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0f80210e950fbe7d6db548f027713aec26864be6c579179f44128815410597bf", - "format": 1 - }, - { - "name": "plugins/modules/packaging/language/pip_package_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1b88e00fa70e9bd96bf141c4d44a7a282b02009c43faff54a4d9d54c69d137ac", - "format": 1 - }, - { - "name": "plugins/modules/packaging/language/pipx.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "57df11bbbf4ae34e6eb934afc6808286721268d74540379d1ab812fadbac296d", - "format": 1 - }, - { - "name": "plugins/modules/packaging/language/yarn.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d932d7644fb9f5e4a333c1e402b68b485a16e3d14883df4b8f9a1f39442d077d", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/apk.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "066665788179692795453db9675607e9c400f214f80382fa1646c0a5c4e0b709", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/apt_repo.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a92bdffb40fa2bc8fc8e6954573fccec4a94a8a23884dcee4f680ddec78880e2", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/apt_rpm.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e8b8b0d6893fe18ae148144e7ce1e816a07cd760ef60511dcb230c0559b4e433", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/copr.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ee22d4a8ae70df45b23c47432192ba596568b8ff2ddb225c7c7908b08f316c5d", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/dnf_versionlock.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bb392c313d8a04369b834a4320c70110311fc1feaef6d58852659dacc682d6d2", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/flatpak.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "77856cfeb650ab5930a8af1eacf9b87d3c654c0041c713daf6b3f6fe85c4a9ea", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/flatpak_remote.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0694a7aeb1878ffe91f91625b645d9fb6391dae6e57bff17dd106c83c6e9505a", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/homebrew.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "066bf7706d89a85f64b0cf890adc84f4ec37b23291b883c12c73e5b2b80a5c03", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/homebrew_cask.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2512568adbfbca7a18574b57f68cdf599ea10b5deabab628182ad98c4a71836f", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/homebrew_tap.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f1d8e1a616a2527b3677f208677e9a1261330777aba1acffa03f093d84f2dc84", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/installp.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1360ed768c621c482767cb1994d96e93827b55a20da4d3f2cbcfbdb5278f9c18", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/layman.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "836e062d867c45bb523e37edfc3cf6b6b9b94700d994f1755d78b706cf3f6bd0", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/macports.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "dbd71696e4f6e58f8d67117c301c32ee210e6765f6b4f7a2a966b64cba91cd16", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/mas.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7346067aa024a97e1fa6c3b2bc55a6eb7469b2eea9c8b69daf179232210248dc", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/openbsd_pkg.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9f9274e283af531ea1604d2231d456b443ca118638c24387c285e51af75bb475", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/opkg.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e456e9b6d5a6760dd77954c9c35a50524344c6f381b69a5b1e278a2b51fff048", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/pacman.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0632694bbe9620826447c3841d4581e718395b052c324c821ef261662980d898", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/pacman_key.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ed012d9d887cdf7f21196040f817b2831ee72056f9ce9a9cf52b622547a760c1", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/pkg5.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e36ce1069607e0608509fc036fb6454af0ede52c3682cb43dea44eedab746729", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/pkg5_publisher.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1197f2086a98fe014717bdf3396a4ab17ce600b9867897b9c9a5464b34f626b6", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/pkgin.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "dcb2759ad7a124939de46ccd21103b3a97d5a9dc027530532a9570cd039eb0d8", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/pkgng.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e7db8e217bcf87e0eb62e61a650f03a800e323132b8d9c25beaa244f77299510", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/pkgutil.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "be59c5c6e33732eee6662cca01a92d47c6391221783a8e13d3f3f6fe81c2116a", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/portage.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ef869657263254c0fe13e4b160bbf16ce1f935b79d1c65c522e528f1faff98c2", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/portinstall.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7f8c255fa388d228c0c2b3e18296ab1f8d9e0ea669241099f8004ec8989b23b2", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/pulp_repo.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "27a10386274c0e0ce4b1898686fadea5811dfd7ad45b5daed757d360a70ba2e0", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/redhat_subscription.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "69c5a89501f2ec7d9cc4dc7ec38941bbbdaa5548d60121bd8734891f5c210d29", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/rhn_channel.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6753c4f100c54548d9a34cc55191a1dff35e789e3ad60a476eabcb85d6e3a71f", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/rhn_register.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3fff78a3b4e686e3e760bbf42691db83540ef06b7d88f28b57223a09f581485d", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/rhsm_release.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a20574f661bf3bcd1bdd02688ed4112eb7a2b35689427e70f5e455ddad7ec1d4", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/rhsm_repository.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c868fab9daf9cd10efb1b01f613cdb85848f37596464a67fe777b68a681b47b4", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/rpm_ostree_pkg.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e0538d35acc1c91abd3bdfa76310252f9782693e7328722ca04228100cebfb76", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/slackpkg.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "65d52caf009ae8dc698a49d4fef5ac6644954a6c46a68fd961b0e690ddfdc141", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/snap.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "513ff327c2a09f42eaa5a945f0b72fe2e6e17bbdc5491b6875c04eaa8f846b48", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/snap_alias.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b23129de9e88a07cf2c3d5012cb32ec105622e7dfcdfbcdaf694dcdf92cf518b", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/sorcery.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ed8fec8e6c5357a8e0a4d7cf020c253a574f8c239f3371b9604beb90cb0975db", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/svr4pkg.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e6fdff83fa4d867e28b52c26ab42377cb8b793218b68a4d538c06b923a78cfff", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/swdepot.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7cf596e285fbcb98e9bae8ee345d63daa2528c34fd93138d6c9afb77db2f7d8e", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/swupd.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8247ec718e884f51246f84426c2c50ed7a48aac0e7ef97161ce11e3aa62662fd", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/urpmi.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2533a194a0b36cceeb0ec69d8586cfe12e8f4c7bdf13e22dc68c7dc9d1c8ceec", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/xbps.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "089f8b636b6bf7eb741857050bb8f3e105c919e705d561501bb91f9a1301af87", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/yum_versionlock.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e9581edf16a8ece5930e0eefd40622ee4e4b453e564d3e40adcdf949ec1257dc", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/zypper.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4386efd38cb4d2e6b5f6ffd4a4d66265541f6ba78547359833de537095036b1a", - "format": 1 - }, - { - "name": "plugins/modules/packaging/os/zypper_repository.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ef4e8074966a7a65e9b22d703beee3f2b6b7aa5b22e28123bdc18d5043f8db88", - "format": 1 - }, - { - "name": "plugins/modules/remote_management", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/remote_management/cobbler", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/remote_management/cobbler/cobbler_sync.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0a69b0d481ff28ea1a5d848fa8b80f9a07a4ccf3a50b3fd384b588d0184a31d1", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/cobbler/cobbler_system.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b4d8ac045e7b8cfadaea593081d4e6bd815492162d6a0a105041563e593827f2", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/hpilo", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/remote_management/hpilo/hpilo_boot.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6d0d47b799f9e444207ed5b4667356cee1de57f1d2aeff137aba990ef08beedd", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/hpilo/hpilo_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "293b316839408346f2c2c0123d90b40c8f609e82a12246c202bc3843fc811d80", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/hpilo/hponcfg.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cc4939e4db789e57dd8b72fa79789b5f5004b98b3a3e4e5ad2a1ab370d6ce274", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/imc", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/remote_management/imc/imc_rest.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e51c5d1375a1a9f469cfc28140144116cb29c3bfa35c459708f6ac76895340d0", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/ipmi", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/remote_management/ipmi/ipmi_boot.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "32bc6fd22d5a4705022af7af389209a8db051bd7994c24e233261bc8188234b3", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/ipmi/ipmi_power.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ad505007f78f7588bc403a75c522ef4ff75de4b7acfdee4dfbce33aa29713e26", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/lenovoxcc", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/remote_management/lenovoxcc/xcc_redfish_command.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "457f980a1ceb9c24d26aa2b7145d26f8902c56a4cbc0ffc7ddaae24670f48741", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/lxca", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/remote_management/lxca/lxca_cmms.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "74ad7330003cfce91c50347b358bea005a2616da70aff5a757bcdd714a3f86a7", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/lxca/lxca_nodes.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "82e905a3d21b63b40414f3ec63dcbd578743c38cf62865ddbe84a5dabb8ec622", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/manageiq", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ddbb9e06f40e750fccf055a42d03a1a80b45bd238d8d4558916c849940b73903", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/manageiq/manageiq_alerts.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3410230671e4ca67fb49d62280309a70c8e272ed44b063aa133b9e906b5d9f74", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/manageiq/manageiq_group.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ab64599f102c1cbc693aa6a963bfdd0890cbe5c9a556bbb95b4a085bbb354421", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/manageiq/manageiq_policies.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "583c115fed4980ab0dd6b7beaf97b8779c5976ed5f212cea213b886f08ea2fbe", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/manageiq/manageiq_provider.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f229203632039bdf0e89ee52305065bf2038e8d934a94ae293012da52feda470", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/manageiq/manageiq_tags.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ace512b173524ed7af89882fe3912511f1138a58a8ef9f426c56226ce8e120fd", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/manageiq/manageiq_tenant.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "99d5ff3a9cc80ba2cb52ac6bcdde27a41e8993d355bae1eea34bf9659e0c7cb0", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/manageiq/manageiq_user.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c9c425603e1e88919c2d9245030f2f02c3866337aa4e81eb702dd003d45069c0", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/oneview", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/remote_management/oneview/oneview_datacenter_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "355d4c6ef338dcf618383018bb1b7a4dff56e8c01f4241a6ddb28b58fa98f4a1", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/oneview/oneview_enclosure_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ba63e68b4e2ce3fbe7cb6e3884ce7f070f6dfdfc4f21ab8f6ccecf32bf4f55db", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/oneview/oneview_ethernet_network.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d2d4ccac855870076ac2e5852e5aba82722d56d161317910c65f0144c9888bce", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b9b15514fd1fc3d8f91b83313acddc8dba8063fdc160c015ca0ac326841d3cd6", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/oneview/oneview_fc_network.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3669b6c65a3689dae16737839dccbbe509725ae75f52c55c2bcc935decef6ebd", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/oneview/oneview_fc_network_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8a59e9a708eb32e0bc67eca344d458f20171812bb765f54069e707817d32f3a3", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/oneview/oneview_fcoe_network.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6afddbe7fa11896de1506c9fe82f234b36ca9640483f8c9247e698981bed83ed", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a89dc5f2cdc9e48ab64afda2958b7dfe0de623bd09ece5d90309f96c5c82f02a", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d8ede8042b1abfffb2b7063e081ab962eeddc3462ba9498c5f777ba7b17aeb79", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2bfeeb09917fa930055ad91ab23dfcc98cbb1c638c83fb2a484326527541c902", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/oneview/oneview_network_set.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a2d0b3c12e770373a5ae9dd4e30e20e9199dd5882cce2ea99b8e132e0d73db4d", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/oneview/oneview_network_set_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6ae6c0631e08a394570f300600d4fc4c667e11a0c8c01b52a00b9b73e6be1824", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/oneview/oneview_san_manager.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6f1b301a7bef55541938d21ee1b2dd59d86c8b4fdc7a7ec29c2b66f30afd0e22", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/oneview/oneview_san_manager_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4d0cc20490ea3903961f2ee4ca7c39bae0c3f2935fd71574fa36a62700283a09", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/redfish", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/remote_management/redfish/idrac_redfish_command.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "879b3d5825eb59bc67aea7014006f58df64853f8bff388fbb2b7d0bcb67b71a7", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/redfish/idrac_redfish_config.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "24cbee078205ddcf90266adaec93635a38384d7f3ea4db3a8e0adef7e69b05c9", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/redfish/idrac_redfish_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "820bb9a147f15fe41bffc5567f699b0f000db2869f2ea268f8e630250d95bd42", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/redfish/ilo_redfish_config.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d8546cfb15f05947f7c6760cb5d67928253269aa18102155f600995d3598b739", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/redfish/ilo_redfish_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1d175b3b05e25ed30302b1ce7994099a19b07709201c864ff37f210aa7df96ac", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/redfish/redfish_command.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "423c2bafbce9538603e607934a6c61cb94d96014b901894a750156f2c6f9134c", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/redfish/redfish_config.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0b46c6fd282bac3a6a347c25af71a4c9eaab7a54fb019541606824c4ea167e99", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/redfish/redfish_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5f9aeb09e5827e46c9b6b4420362d7c27d729672322a10637d66164d5341e980", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/stacki", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/remote_management/stacki/stacki_host.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "63b57ef41bb4ffad7bd5def9d9d592e3bf2aecc1b22dc66a303774f3b6b95ef7", - "format": 1 - }, - { - "name": "plugins/modules/remote_management/wakeonlan.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "eaedb6adc85510f03ea6424a673ef862122db281b83f75d3f66668652443fec8", - "format": 1 - }, - { - "name": "plugins/modules/source_control", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/source_control/bitbucket", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/source_control/bitbucket/bitbucket_access_key.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "36c0e727d4cf7e57a1ccb7f712ca472f3ed20a8c0b5afa656c9461d39b948ce1", - "format": 1 - }, - { - "name": "plugins/modules/source_control/bitbucket/bitbucket_pipeline_key_pair.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c4b8d0fe0f4ada9e881cc1e76e9365bbac7d35f0650235b9033037482d1e5670", - "format": 1 - }, - { - "name": "plugins/modules/source_control/bitbucket/bitbucket_pipeline_known_host.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "dd5b27ae648269aab81d3ac46036fc6288781c2a77c02db480ea66ba1bc1445c", - "format": 1 - }, - { - "name": "plugins/modules/source_control/bitbucket/bitbucket_pipeline_variable.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3409614c64334e483f093a3f094fab692d09aaac0db65da0225337e4db2993a0", - "format": 1 - }, - { - "name": "plugins/modules/source_control/github", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/source_control/github/github_deploy_key.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3d942e6c9a4fc0c0b2ab2b6cfcbb2067b044956b0cc8e3a4eb8908fceeca4308", - "format": 1 - }, - { - "name": "plugins/modules/source_control/github/github_issue.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c71ba6cb604c76b2200e68acff20cf55e167b5fbc111aa68a6efd0b6b0573977", - "format": 1 - }, - { - "name": "plugins/modules/source_control/github/github_key.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fe0c5fe85830fe7c1bfdcf99cdbc14af5366e29b04eeed1cf551092734279801", - "format": 1 - }, - { - "name": "plugins/modules/source_control/github/github_release.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a0feb5df29b4556ddae70b101a78da6127312803680504c61739b57b4008037c", - "format": 1 - }, - { - "name": "plugins/modules/source_control/github/github_repo.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "46c5064a6ffa00ff6971115414370a5e49a5dbcef106f18c16a89428e6691fe0", - "format": 1 - }, - { - "name": "plugins/modules/source_control/github/github_webhook.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "781a9ccef57e174ddfba6f794b147aa941b53959652a3fbfb9c38b37d4dec4a1", - "format": 1 - }, - { - "name": "plugins/modules/source_control/github/github_webhook_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0f2d091ba64877de90900c03df4412db8b71393e0d5a742202feda625c05398a", - "format": 1 - }, - { - "name": "plugins/modules/source_control/gitlab", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/source_control/gitlab/gitlab_branch.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "922b6c30c67ddb2acf0d28aaa9ab16dce5b1f6ad270223ec6773ef680e35c746", - "format": 1 - }, - { - "name": "plugins/modules/source_control/gitlab/gitlab_deploy_key.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "43f0d1631cc651c15a935e280f31677805aae6efb6d80b95d21511b8fe4f79ea", - "format": 1 - }, - { - "name": "plugins/modules/source_control/gitlab/gitlab_group.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6f566f0df7ea3a6d02b4fe0e8550d06400ac926d3d6a24975582c680d3a52528", - "format": 1 - }, - { - "name": "plugins/modules/source_control/gitlab/gitlab_group_members.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "10e9d62d1291f8ca28d2dd9d40d67a10028713c53530f516490edfb2187d3644", - "format": 1 - }, - { - "name": "plugins/modules/source_control/gitlab/gitlab_group_variable.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1394fda09fbc289cf2716876d6a5463889abeb5d2ceea2915235dfbf29aa4684", - "format": 1 - }, - { - "name": "plugins/modules/source_control/gitlab/gitlab_hook.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bdce5a96cd31d9444b1841eb9ee396683c70ee3eb50634d2f02c38ce07b374f6", - "format": 1 - }, - { - "name": "plugins/modules/source_control/gitlab/gitlab_project.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7ba5e593304a1bb3dce94dab2cc62470a892eb3a039b1e6f99a95869d59c093b", - "format": 1 - }, - { - "name": "plugins/modules/source_control/gitlab/gitlab_project_members.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1a3075b6dd2783cf000979cdff99bf7b4f785802ed9e6e08002f629cc1a8efa9", - "format": 1 - }, - { - "name": "plugins/modules/source_control/gitlab/gitlab_project_variable.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "48faf16faee67ab8516ea6b0b7052cc272208325f8c8602c2f013b4384d2eef9", - "format": 1 - }, - { - "name": "plugins/modules/source_control/gitlab/gitlab_protected_branch.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "95ed01ee57390473707b05542cd73dfbc4ff729c5be435222d74ec4b16502435", - "format": 1 - }, - { - "name": "plugins/modules/source_control/gitlab/gitlab_runner.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "63967e029ff266796082e00ef8263369f5a684b01213308f62d35be1d8c65926", - "format": 1 - }, - { - "name": "plugins/modules/source_control/gitlab/gitlab_user.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ff0e35d6b34eb457ba640265b41f35bb6fcf335328eb3155f6e3318f12067dd3", - "format": 1 - }, - { - "name": "plugins/modules/source_control/bzr.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "127a4d24fb7ecd0ae8286c7f1eb5332ca2e3217e7ac29ed85c1e814eb7cfeebb", - "format": 1 - }, - { - "name": "plugins/modules/source_control/git_config.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4df0f064e3f827b7af32547777bec982cf08b275708cd41bf44533b57cfefcb6", - "format": 1 - }, - { - "name": "plugins/modules/source_control/hg.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "142f44f349abfc23bfda7f9f2df47d160f2a97446d7d5d31749fd5eab7adab37", - "format": 1 - }, - { - "name": "plugins/modules/storage", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/storage/emc", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/storage/emc/emc_vnx_sg_member.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bdf6c7c0da78522f40ac8678ad94e2088374f137927b412b36c5b538fd257453", - "format": 1 - }, - { - "name": "plugins/modules/storage/hpe3par", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/storage/hpe3par/ss_3par_cpg.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2be10ff6aa61f598720d6ca0a1668a5ec6033680223fa3d3231192f3c12006ef", - "format": 1 - }, - { - "name": "plugins/modules/storage/ibm", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/storage/ibm/ibm_sa_domain.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "846c2e2161c51130505d8caeef87178eb8cd40b5fe42d9f9c6649b444f0d7c7c", - "format": 1 - }, - { - "name": "plugins/modules/storage/ibm/ibm_sa_host.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "42574cb0750d740dcbf3dc300cca235b15a22ecb00f79af5aa7818a494b60366", - "format": 1 - }, - { - "name": "plugins/modules/storage/ibm/ibm_sa_host_ports.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "dc5ac76115dfd50d5b8b37aa9de8c75824e6354a4aa925a171a364dd0fe60fbb", - "format": 1 - }, - { - "name": "plugins/modules/storage/ibm/ibm_sa_pool.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a1d51e21c6dc90ebea2e67c86200aa7c28b8451bd09c35cabdd5d53123cc1b35", - "format": 1 - }, - { - "name": "plugins/modules/storage/ibm/ibm_sa_vol.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "44582854ca8e702de67f555704e9d3b007ece65d723bb24536a567e9e7031757", - "format": 1 - }, - { - "name": "plugins/modules/storage/ibm/ibm_sa_vol_map.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7a90662d294fcc853121b02134446a6ae10c430a5caf3ebc0766de0cbba6479a", - "format": 1 - }, - { - "name": "plugins/modules/storage/pmem", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/storage/pmem/pmem.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "87f561ffee94533db91e813e348569aa7f44c076935e43430268f62a5ead5c0d", - "format": 1 - }, - { - "name": "plugins/modules/storage/vexata", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/storage/vexata/vexata_eg.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fac270b3db28c9f8b6d24d299e753c80f9d251dbbdcb386a319097c17219a80d", - "format": 1 - }, - { - "name": "plugins/modules/storage/vexata/vexata_volume.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a6377d7306fb5a11f52aaa9a89cff909e8028a7cef71959eb6a7135ba1561d4a", - "format": 1 - }, - { - "name": "plugins/modules/storage/zfs", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/storage/zfs/zfs.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0d5c3365e12bd96290f24b1ec13e5161e61f505d07110e03ff58195397373516", - "format": 1 - }, - { - "name": "plugins/modules/storage/zfs/zfs_delegate_admin.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3109f4627ebfb5190204f57294c84ad0d54197c99c3a001b1f69f5291124490f", - "format": 1 - }, - { - "name": "plugins/modules/storage/zfs/zfs_facts.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "18a3b38a9f47f4f5112579b819de7d664e0b55db8995743d4eac364579af5e2e", - "format": 1 - }, - { - "name": "plugins/modules/storage/zfs/zpool_facts.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e26beb9afe4a1cbd3b2a05eec94c61ee16b586db9985c962f09c76c15f80883c", - "format": 1 - }, - { - "name": "plugins/modules/system", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/system/aix_devices.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "977386dee01ac51d9c885ecee657e0a24df1b5de87996f0a9c9f8c3d0605c08a", - "format": 1 - }, - { - "name": "plugins/modules/system/aix_filesystem.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "292ff33ccfbcaaf28dc4cd67f6b749dc6b06ae1aa72db436245d348946c19bf7", - "format": 1 - }, - { - "name": "plugins/modules/system/aix_inittab.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1e4b6091b24210a657d58c1767107946ecdf34f90cef0460762144b8cf6d4cd2", - "format": 1 - }, - { - "name": "plugins/modules/system/aix_lvg.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "633b5243b9ea9b21d80f381a9698f140586e3a39310d21fb83ef8b5aa0d350cb", - "format": 1 - }, - { - "name": "plugins/modules/system/aix_lvol.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "feb995da59928c227261390532e549999f7a27594f09744529878c91b72e7bea", - "format": 1 - }, - { - "name": "plugins/modules/system/alternatives.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "507ab83ed8cc3718318b5de58d67eb743ad0318eab406441eaefd01a5eb18dd1", - "format": 1 - }, - { - "name": "plugins/modules/system/awall.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "63f6d1714ac308da87c08e54b17fc2205f0bf2426d26914061074317ae835b8c", - "format": 1 - }, - { - "name": "plugins/modules/system/beadm.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "07a418d4d0b40c72721627f7c49bc9f2e6c780247e9f101bfa57c79bf18bbf6f", - "format": 1 - }, - { - "name": "plugins/modules/system/capabilities.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7d9e46ddf9acbb7caa0bf526654e9b199abf60e253a551d9f10c4e4673fd6713", - "format": 1 - }, - { - "name": "plugins/modules/system/cronvar.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "14583a0612a939471168bd5d59e7edac48bb01d024aa0d0fc7cdeffd0e923178", - "format": 1 - }, - { - "name": "plugins/modules/system/crypttab.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d020cd305a432f0da349b1243d96fba57a3290b456016dbf7480cf6ca3dd9e92", - "format": 1 - }, - { - "name": "plugins/modules/system/dconf.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ca342ed1e3cae2da6bc5ee31e05db30f23344f75e4c68a06f577d24ddde2347a", - "format": 1 - }, - { - "name": "plugins/modules/system/dpkg_divert.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "83eb8748719f999e73a1e00bddc2ad0c4fcff0da7d1771feba9e7d1402f260dc", - "format": 1 - }, - { - "name": "plugins/modules/system/facter.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e9dc303791af31b7355e612dcde7b32ecaa6083514c401a900c1bd6c5da5c616", - "format": 1 - }, - { - "name": "plugins/modules/system/filesystem.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "00db45139f32500f03fdb8b276664e856ee2bbd3e48e225d0bc5d3ab0adaedc1", - "format": 1 - }, - { - "name": "plugins/modules/system/gconftool2.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6e5a59c14afe686e07a8595a7f102e632ee78d2dc90749bd147e87b8906ef113", - "format": 1 - }, - { - "name": "plugins/modules/system/homectl.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b770717fcdd6ce98d6b74d1d050fe20ab9278e7a4d2862882afef34ed3938feb", - "format": 1 - }, - { - "name": "plugins/modules/system/interfaces_file.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "25e134950671398223e77965d70780612354f1f321ef3b196377b8fe734adb03", - "format": 1 - }, - { - "name": "plugins/modules/system/iptables_state.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "06358c739fcc70ba79d43af924c0f35a6920d8c5bc4292c14f96dd5870b8d4f7", - "format": 1 - }, - { - "name": "plugins/modules/system/java_cert.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5c40619fd173dfc758e1dbe6ad2083a924a6b138592fb98244b3d7a152dbbb54", - "format": 1 - }, - { - "name": "plugins/modules/system/java_keystore.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f2b9a344962a24cc2754aa948d60b383fbb21dfb7be36fb4cf2582fdfd896cd7", - "format": 1 - }, - { - "name": "plugins/modules/system/kernel_blacklist.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "22cb952a459ea253cfd9eaf5d6612dabe02cf670385d9a95e0ad8212b8496b1c", - "format": 1 - }, - { - "name": "plugins/modules/system/launchd.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "287f7a5a7c8d859038ca8c15e7d221a1bce7c56b02942260f135b52229e177b0", - "format": 1 - }, - { - "name": "plugins/modules/system/lbu.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7471d902ef679d8cc8dbeb52b2f737758d696777c83c36332214a727ab7bf1dc", - "format": 1 - }, - { - "name": "plugins/modules/system/listen_ports_facts.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5966c7c49a2850b1c13757899a6bd5443a30319f0b6f2628077662fd703df5b5", - "format": 1 - }, - { - "name": "plugins/modules/system/locale_gen.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d79413b262062855f9e4d97f7fefebbf5f18504e8d36da6496f20a0626c7b8be", - "format": 1 - }, - { - "name": "plugins/modules/system/lvg.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a797ac328651f2c55e0e3f4d09629095014390bd99b82971aa1fced50249177f", - "format": 1 - }, - { - "name": "plugins/modules/system/lvol.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "faa2fddec92f0bebc7a4536cb716748cadb99d57be46e04faf4f14cb43958e86", - "format": 1 - }, - { - "name": "plugins/modules/system/make.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b119a10b4ef68686d49cfad00d5c3f4cfec954bce9f86dacbd5011fe2a746b9c", - "format": 1 - }, - { - "name": "plugins/modules/system/mksysb.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f4d453b498fb00531d86635f21b89e9da427d17788a8dffd624a7eef2d64260f", - "format": 1 - }, - { - "name": "plugins/modules/system/modprobe.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3d587d82af8364836d095369488fd76b90dea4f4bf068ac96984f50302fc7228", - "format": 1 - }, - { - "name": "plugins/modules/system/nosh.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b98560dd3abfba1dc2fe078a56a4eb93bdcb24af42ef6ee70c413dc7f1f9df3f", - "format": 1 - }, - { - "name": "plugins/modules/system/ohai.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e4272be634bd89295c956ff2215715a967d299b5d1173048d0513cb45dc1f5f9", - "format": 1 - }, - { - "name": "plugins/modules/system/open_iscsi.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "307fc84c58937372a867cbf944d16e3a0606ea44e6699f5782c49c64f3957eda", - "format": 1 - }, - { - "name": "plugins/modules/system/openwrt_init.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "55836f6f5d1311011d3184178e63629e7b5a5bc28be88818944e5f8ef9ede13b", - "format": 1 - }, - { - "name": "plugins/modules/system/osx_defaults.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "91214ca6596b68554a16c909bb3e5d232b74218b55b9207102ed672ed70b14f6", - "format": 1 - }, - { - "name": "plugins/modules/system/pam_limits.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "87cc82831d55468a2c0d6d86970417652f0b6403b5f9c50ca6bb6d2e5560a294", - "format": 1 - }, - { - "name": "plugins/modules/system/pamd.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "968da2701d4dcb58bf11fb374bc3ccbbc3060c57ca3881fdf8f6bff30f9a8ad1", - "format": 1 - }, - { - "name": "plugins/modules/system/parted.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9ed692725bcc6a521bfab3f2fadf1933e99cad99896ab3400c8264306e883e46", - "format": 1 - }, - { - "name": "plugins/modules/system/pids.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "dc2569182b41b994eba6fe7ff080628813b09e98c7ab70b9c10f236e6f33a01f", - "format": 1 - }, - { - "name": "plugins/modules/system/puppet.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b5fa5b7c452ca6ff19a0dec8516667e2afc31f5388fc822a92e20d4c144e2a91", - "format": 1 - }, - { - "name": "plugins/modules/system/python_requirements_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9fa050aedaedf5dd2693f4443418b780e5efbe06bf332f6b1fd675dec120ac6f", - "format": 1 - }, - { - "name": "plugins/modules/system/runit.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "72f3a3dfab5c5d69e79feb4564374076228b714b842e6606bebdc08317c2d74e", - "format": 1 - }, - { - "name": "plugins/modules/system/sap_task_list_execute.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b1fe8a9ff6fd21d93aa37a3bb40f875dfae6d25c2d5aedb6580197f77cb75ead", - "format": 1 - }, - { - "name": "plugins/modules/system/sefcontext.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "be1154ed383b3b642dff0e92276c0943ec2e7a5b875e7f16e78ee5764c1d8283", - "format": 1 - }, - { - "name": "plugins/modules/system/selinux_permissive.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "52a988c4f8aa70cd2734333b75b7ec5977be80c272badca53a60df50f157458d", - "format": 1 - }, - { - "name": "plugins/modules/system/selogin.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7424203ca02499f11893f07191e356ee4bf7a92f8c6c66f3760bb3662756bf38", - "format": 1 - }, - { - "name": "plugins/modules/system/seport.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "56ce94a493847ce43ad44e30af4bd87b816feeaa4ce15648828998b34efdb721", - "format": 1 - }, - { - "name": "plugins/modules/system/shutdown.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "02c339648349f7eaa4fc7b64c85ee8c40cfc98cda4c9b97879658efaf889f552", - "format": 1 - }, - { - "name": "plugins/modules/system/solaris_zone.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "60a77ff20a8d31547321204ecb03e5962a99cb34773e9bb46cf25ecfd0ef52d8", - "format": 1 - }, - { - "name": "plugins/modules/system/ssh_config.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a1764d656d155306fa1c01f06ae71350613998bab940e036272a702ec2cf7510", - "format": 1 - }, - { - "name": "plugins/modules/system/sudoers.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4b125be575e79d2de7d840aef13ddf5ed40623de0f5e5bc74863e5a09610a5ee", - "format": 1 - }, - { - "name": "plugins/modules/system/svc.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "97cb8133ea514678200f8dd1d4041ce90327486c903143912d7995806c16457a", - "format": 1 - }, - { - "name": "plugins/modules/system/syspatch.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "89c7d7ddd8731028bb3f5ea8426de2b5b8f19c0d2d9a0e6978aa67347be0540e", - "format": 1 - }, - { - "name": "plugins/modules/system/sysrc.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cd30445b5d09ca12cd4166dd59f204b4be4e0761ac8ddf7dd851a2d5026bcebb", - "format": 1 - }, - { - "name": "plugins/modules/system/sysupgrade.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9c9bab43a8cc9cb85528181f72c9a881e6e53a39755461800aded2b3a27216c8", - "format": 1 - }, - { - "name": "plugins/modules/system/timezone.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f762436db06c2b4085c9421b3e9a2337d1b65e1fce6663cc55e6d2efbe774668", - "format": 1 - }, - { - "name": "plugins/modules/system/ufw.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f0958a3686ca75540353eddd3148a6e4b19ed9b57bac7e6994e949572dd2a1fd", - "format": 1 - }, - { - "name": "plugins/modules/system/vdo.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "89c6c5018638467973eee8012275abf8a5f611a01cc073bc82ce583e52b3639f", - "format": 1 - }, - { - "name": "plugins/modules/system/xfconf.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3e6be01aa8dd20b6a1280caa636ea2321e0ce1635a39ca05517689b94716db9c", - "format": 1 - }, - { - "name": "plugins/modules/system/xfconf_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6a5da7521b9e492203fa819ac907686227c1184a6ccb327c35a3b5e6b59b9e6e", - "format": 1 - }, - { - "name": "plugins/modules/system/xfs_quota.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "27825f948b8481607c8829578da78f5b9030677cdf578304491fc9d6ca4f1348", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/sophos_utm", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0ad7213f9e7d5c8683f0a608a816f02f935bd3aa514be57a18671290391e7a44", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0f7e66c06b83fec400b96810f28ce02f9d7c6c20cec8ebe5e321f163c318d8dd", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d2c1649b50116c8b150ecdd4ca13c91bc52f49a22a57cd7aaec2d4c6125c0524", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "826a7d96e136504ae975e591e769dd5fdff2c96b59eaff5535dfeb43fbaf08d5", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fc5c40e788f2cf6dd4e82f618f6f37ea21e3ce497c640c49bfd9ec2ccdf234e0", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "29d9fe615e9c8b54a8bdac9ca4c4a0436ae3d3cae2972bae73df9fbb071072e5", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "399fa31a5fc8cfcf1a0f8fd944f7ca139446413e6fff5251083c226bb5274aa7", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "54ded3e29eec68ce76581b665af3228e58fe76211ffc3a392a890d42eac30289", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/sophos_utm/utm_proxy_exception.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4dd04942dd16dae3c1e1de10712363b8cc67597db2647fc58d3a085c0a5d6e0b", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1d39c2514f334eace3ce91c284d85afbaa6ce488b6dec69d7cea6689247fee56", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "da27864c36b0b1636bb1016f6623d38cc2685d9f1073d9023baf6650e2b5fbc5", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9b3f2a4ee29a7fd7a468d7a4feaae37f0ce5d90fc963a91561feae1de5cd21f2", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "af35736343e2510d4ff9dc5ca4a01c3a6a17ae83685ea43381b8ae84190f1050", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/apache2_mod_proxy.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9d5fe445448cb9e4605eb0fe5c84e599ae353ecb8a256729b0510392d4fbbc4e", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/apache2_module.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4dbb4a1e3308a693aaa3101faa828015f66a6a65e040cf3a9a2eee417800d6b0", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/deploy_helper.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d29a73dd509521790e2dcfde24498ea2967bbb5a4c659d26c8a91f41c1cc231c", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/django_manage.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "be65d011c47d6222a81d1b82af3f9e2cd5853f174c60494cfcc1930009e315ba", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/ejabberd_user.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "92c3d42c1eb1126af9f9bb8c118c0a08f28f599c057a03a254b03e76b370614a", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/gunicorn.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4c0fc574bc49deaa348708e90945d2b44c5ec61d22f3919022bdc67c105666cd", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/htpasswd.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3a9e50c4e8fff4250f074d11041a587ae773629bc33fd8082a1c28c68c99c1b0", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/jboss.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "413a5203f4d159144142272b5e494f10d032d589d31b0d5167b60ab0e5d40664", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/jenkins_build.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5a70f3860a8a4adf2ab17cc214be4812d8e72fae7ba2a748fbbbe9bb9755178b", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/jenkins_job.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "289f71c98eae7a1138cb3b922f1b7a431d3cf593ef838ff7f152c5ff60839a28", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/jenkins_job_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "eb90242a9999203cb2fa1d6af3e9a8c54ad57530e91aa338f00cee8fd7a4b32e", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/jenkins_plugin.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9f36ba039a959f4ab537e6736021dbb68c50ed10e7ee3eaad03307c5726155e3", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/jenkins_script.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "194b41bc5b511c44e15b770526dcb63625ec530b963e650343467f12b5a083ee", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/jira.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "072dfce83798a6ca7fb0c0395e8d8168ca28b140857ef73687bcfc04ebe00941", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/nginx_status_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3be0b85c00ec846e372cd74d28bef34f32211231f6c8cf45803285ff76320d39", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/rundeck_acl_policy.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5f5d8165b92c6995925b290f7956385d5f58e67db78fc5999a8d9fce2c8631a4", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/rundeck_job_executions_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "70a72bee59a76399bccced7e6db5b5079df984405f5e8f6c03aa077cf0a3954e", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/rundeck_job_run.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "11003889632bd0531f924dd291d0e9df1ccad0225e3e252e9dc33a258768c8b1", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/rundeck_project.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c2c34f541040b892e7f031487104db7ec1b0e1a522817e8308d586f9d503f6f8", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/supervisorctl.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5a130a0e5a2402d2d964a069ae288d1faff9808d48f8b0f4d4a83a9fa55192ba", - "format": 1 - }, - { - "name": "plugins/modules/web_infrastructure/taiga_issue.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c3f0162389f24357b7981000dc718ef8a794b260ef570753703bfa372d593583", - "format": 1 - }, - { - "name": "plugins/test", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "plugins/test/a_module.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3e1af0bd0e7fb21c5640786d6120056f5dcec24748713cd7b1bf332aef1818b7", - "format": 1 - }, - { - "name": "tests", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/aix_devices", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/aix_devices/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/aix_devices/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5926739106235917ed4672c00a9b356ff7ef3016b826d8d0976c65c5b705288b", - "format": 1 - }, - { - "name": "tests/integration/targets/aix_devices/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2fb2fe1de7acac9fe00bbe9918b6ec663623abf8938099a8f7b41505d703db55", - "format": 1 - }, - { - "name": "tests/integration/targets/aix_filesystem", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/aix_filesystem/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/aix_filesystem/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a7547d84307d4170efbf1815ffc8bf4badd3e70c285bca90255b2aa80c004758", - "format": 1 - }, - { - "name": "tests/integration/targets/aix_filesystem/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656", - "format": 1 - }, - { - "name": "tests/integration/targets/alternatives", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/alternatives/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/alternatives/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "854b5b1c0dd3a694bcd987ad52daa4a2a5e87e241145505fa364de304e830277", - "format": 1 - }, - { - "name": "tests/integration/targets/alternatives/tasks/path_is_checked.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "055494791cbce8c13c3229b97afc4d57de0d7abf31cee3684b6cab1f41265699", - "format": 1 - }, - { - "name": "tests/integration/targets/alternatives/tasks/remove_links.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c76d52c3ceac3df1588b8ad3933e040ac9296bff57bf8ac32ae533eedf36453b", - "format": 1 - }, - { - "name": "tests/integration/targets/alternatives/tasks/setup.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b882d2293d0e0475c3f0a1d940a9d96fea2a5c377e3b8579f634fad461f6909f", - "format": 1 - }, - { - "name": "tests/integration/targets/alternatives/tasks/setup_test.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a7a37539eeb0d2752094ffac9773b106db1f090125ed4ec38213915f7b623e7c", - "format": 1 - }, - { - "name": "tests/integration/targets/alternatives/tasks/test.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3724e47d997d1bd51e38961daa366646ec705cef09cd62a2948136e8dd2cf84d", - "format": 1 - }, - { - "name": "tests/integration/targets/alternatives/tasks/tests.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3d53e7fb88aca511c4dec5f6993622b07eb75310dd302203bc9d128d2d5eb9a7", - "format": 1 - }, - { - "name": "tests/integration/targets/alternatives/tasks/tests_set_priority.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d6a461fd274973c212b83e56e5d7a197e316a582e9ae6e85547476e302494505", - "format": 1 - }, - { - "name": "tests/integration/targets/alternatives/templates", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/alternatives/templates/dummy_alternative", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fa719c49691aabd3b22160f0b5c64afcb002e65dc718e33989523be08faf2971", - "format": 1 - }, - { - "name": "tests/integration/targets/alternatives/templates/dummy_command", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8af22a87ded6536dace0aa9e546372b01191d6ea52e9011cc42503d4f8216e0d", - "format": 1 - }, - { - "name": "tests/integration/targets/alternatives/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/alternatives/vars/Debian.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6d955d8a80b9d85aab9779d3598143d9a97f02d3987637307bfa69cdb599f844", - "format": 1 - }, - { - "name": "tests/integration/targets/alternatives/vars/Suse-42.3.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e2b52f4afd41f1c28b2c48fff66b165191525fb9ebaa825b3e104c98457d540a", - "format": 1 - }, - { - "name": "tests/integration/targets/alternatives/vars/default.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "061cd989ba91f9bcaeb0c970b22d2aa9e2cf13a07d1e03f9074ddbe9a874e0db", - "format": 1 - }, - { - "name": "tests/integration/targets/alternatives/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "680dff9a6433cbeb4ff95620592e73d53b323a4205d09e030ba29a479c347587", - "format": 1 - }, - { - "name": "tests/integration/targets/ansible_galaxy_install", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/ansible_galaxy_install/files", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/ansible_galaxy_install/files/test.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "eaee3ad37e63b6f3e5453cae5e6f3c20ffb8cab3973992d47ebc0a5e187577fc", - "format": 1 - }, - { - "name": "tests/integration/targets/ansible_galaxy_install/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/ansible_galaxy_install/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1e8d632f9db7209967c5b2f6d734bede09841acc7b898dafc19f31c72cee9929", - "format": 1 - }, - { - "name": "tests/integration/targets/ansible_galaxy_install/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/ansible_galaxy_install/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2a2bdbf4bb8031c938a4df332d7e01fcb66976aadd532c31e876fe1d09ab411c", - "format": 1 - }, - { - "name": "tests/integration/targets/ansible_galaxy_install/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3bd265e7abdf487e7c1979bbd71847984a4d82c60f714b417b5fae98c007967", - "format": 1 - }, - { - "name": "tests/integration/targets/apache2_module", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/apache2_module/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/apache2_module/tasks/actualtest.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "30258755b2ed6e44a697865a85bed3e8dcee2b51dd0b3ac6cce0da6b0b668074", - "format": 1 - }, - { - "name": "tests/integration/targets/apache2_module/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bac2332e517b1b916557f9943bf76a536856a58afc6213c77edde4682c07c8df", - "format": 1 - }, - { - "name": "tests/integration/targets/apache2_module/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b6e6b3eab89eec157e047b733c9e9c8b2ae7ec87e514ef9057018fee6fca9ba2", - "format": 1 - }, - { - "name": "tests/integration/targets/archive", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/archive/files", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/archive/files/sub", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/archive/files/sub/subfile.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/integration/targets/archive/files/bar.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "08bd2d247cc7aa38b8c4b7fd20ee7edad0b593c3debce92f595c9d016da40bae", - "format": 1 - }, - { - "name": "tests/integration/targets/archive/files/empty.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/integration/targets/archive/files/foo.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b6a5ff9795209b3d64cb5c04d574515413f9fec7abde49d66b44de90d1e0db14", - "format": 1 - }, - { - "name": "tests/integration/targets/archive/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/archive/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "769fd1fe7bec847f2f097de2fc87ddc3062a542f24ea15a783fcea9e18b038f7", - "format": 1 - }, - { - "name": "tests/integration/targets/archive/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/archive/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "eb260b9fc93fc4d46a44bd82dd8cd91dece50419b0333ae0720eb7a794c02d8a", - "format": 1 - }, - { - "name": "tests/integration/targets/archive/tests", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/archive/tests/broken-link.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "21d26e66620ce3569910265c7a27bfa52768988267f6bd979ff5bd4cd6f87396", - "format": 1 - }, - { - "name": "tests/integration/targets/archive/tests/core.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3987b41809217f072800b851ba8f19a00ffdc1b7e4508c9b497eca4a4e9e1438", - "format": 1 - }, - { - "name": "tests/integration/targets/archive/tests/exclusions.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1e9d1430f00d9b6cdfd6a57f35df47ba60968cded33565c842f3dcc517a2bbb5", - "format": 1 - }, - { - "name": "tests/integration/targets/archive/tests/idempotency.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3ca1627a609eb62411e3c3e6936b0511e17d1ab66bb6fac4afb3b48d0928f3a", - "format": 1 - }, - { - "name": "tests/integration/targets/archive/tests/remove.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "20c87205bce112a015d7a53bc819492892425783446f8ffc7c599fa43fc8cac3", - "format": 1 - }, - { - "name": "tests/integration/targets/archive/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "06d75688d37669db031d0d5d90607d660a646b524ff85ccefd059bd69b9fb352", - "format": 1 - }, - { - "name": "tests/integration/targets/callback", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/callback/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/callback/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "122d3dc3384d2031f9179746389f7641fd35f9bdb31a062613670f8586f2a5bd", - "format": 1 - }, - { - "name": "tests/integration/targets/callback/inventory.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "74bdaf35b547d38d9a2d81fb57baf2ff9fe88525b0de1cac491ce9fadcdec6c5", - "format": 1 - }, - { - "name": "tests/integration/targets/callback_diy", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/callback_diy/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/callback_diy/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ade8d31897eb82d321318493957dffb3422b03c3ef58e953bd8ae877ccce3e23", - "format": 1 - }, - { - "name": "tests/integration/targets/callback_diy/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2aa69e858ce545ae65624bea04459e7be706c4c3f1014e0a5408501b064663fa", - "format": 1 - }, - { - "name": "tests/integration/targets/callback_log_plays", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/callback_log_plays/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8a5657d4075cac91b7e2fc1ffaaa04d37136cc64e51c75c2d261147519bb12d6", - "format": 1 - }, - { - "name": "tests/integration/targets/callback_log_plays/ping_log.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cc5ca975d2e3a0c4289c613b8a1187f1bac1274cf1a96b13d0bf47bf2fb7443b", - "format": 1 - }, - { - "name": "tests/integration/targets/callback_log_plays/runme.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ad703f7bee42e32b4bfdc9f79d91bb3e604d12319eed16bbe8c575c691c46290", - "format": 1 - }, - { - "name": "tests/integration/targets/callback_yaml", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/callback_yaml/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/callback_yaml/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d4e464262798abd7e2d15aa116626e70ef0e9a77bb7eb2b1d9334f2ee4e14e0d", - "format": 1 - }, - { - "name": "tests/integration/targets/callback_yaml/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7d128604afccb61be3dd2a49ccb142879c91ab4786e985688cf23aca7c019b16", - "format": 1 - }, - { - "name": "tests/integration/targets/cargo", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/cargo/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/cargo/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c2af6405d42a5abd438f4a256a6a5c19b8fb5eb5559209c0cbabd150af911837", - "format": 1 - }, - { - "name": "tests/integration/targets/cargo/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/cargo/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "349f906e66c0d40d5cb80ffb896f006fc4bfd3ccd6883962bc0119138ae58026", - "format": 1 - }, - { - "name": "tests/integration/targets/cargo/tasks/setup.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a412dc48a16f026307565acc48f4d2f764e7b3ac7d9d7f6d4b46279bc412979a", - "format": 1 - }, - { - "name": "tests/integration/targets/cargo/tasks/test_general.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "47442c80aefa08b4a44ec27d445c0ff81c1634760c8b12ec22dcab80f3233262", - "format": 1 - }, - { - "name": "tests/integration/targets/cargo/tasks/test_version.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "37c908993016702c0a0b5e770b82af6aa00a676624d37db6c720c1b9d9951d3e", - "format": 1 - }, - { - "name": "tests/integration/targets/cargo/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e8d3dcfac4ab53e9f978dec321326b28e47cd068b23d8d6fb33beda52f87e791", - "format": 1 - }, - { - "name": "tests/integration/targets/cloud_init_data_facts", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/cloud_init_data_facts/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/cloud_init_data_facts/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a", - "format": 1 - }, - { - "name": "tests/integration/targets/cloud_init_data_facts/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/cloud_init_data_facts/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3633cb76ff657891fec60cf58e6e07a9e08abf697fc54d705ecdd10820f42ec4", - "format": 1 - }, - { - "name": "tests/integration/targets/cloud_init_data_facts/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bd70b4562a8f5f0b0b7166b6815afa23318c040d3ede1055aafe92b302a09169", - "format": 1 - }, - { - "name": "tests/integration/targets/connection", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/connection/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e084a3683ef795d1cdbf5e9b253f2ca1f783ae0d0d6e47e419acbbc4fc80bbfa", - "format": 1 - }, - { - "name": "tests/integration/targets/connection/test.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f32dbff55de60ace66e2555586b94abd0f74f6bbcc008eb8d1c25dbfcc464a3e", - "format": 1 - }, - { - "name": "tests/integration/targets/connection/test_connection.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3297fe2040e5b0c523fd6f14bc0c56a886980c2a1b241b93bcce847958528861", - "format": 1 - }, - { - "name": "tests/integration/targets/connection_chroot", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/connection_chroot/runme.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ccdab97fd6569dbe495460cfec0b901006efa79ec4c6d066a98250901b0c9d0e", - "format": 1 - }, - { - "name": "tests/integration/targets/connection_chroot/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1d4961f0b8634e5a5e3f194d624d8ff66252900892b32fc46c12db712aa1eb43", - "format": 1 - }, - { - "name": "tests/integration/targets/connection_chroot/test_connection.inventory", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5eb84ac30158da1476995439d5c07afbaa95553857727aaf5d68f734f706607b", - "format": 1 - }, - { - "name": "tests/integration/targets/connection_jail", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/connection_jail/runme.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ccdab97fd6569dbe495460cfec0b901006efa79ec4c6d066a98250901b0c9d0e", - "format": 1 - }, - { - "name": "tests/integration/targets/connection_jail/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656", - "format": 1 - }, - { - "name": "tests/integration/targets/connection_jail/test_connection.inventory", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e68e68eef9de19ce29e372127ec2ff42dfddee8af5934e1c6785b5896d540681", - "format": 1 - }, - { - "name": "tests/integration/targets/connection_lxc", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/connection_lxc/runme.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ccdab97fd6569dbe495460cfec0b901006efa79ec4c6d066a98250901b0c9d0e", - "format": 1 - }, - { - "name": "tests/integration/targets/connection_lxc/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656", - "format": 1 - }, - { - "name": "tests/integration/targets/connection_lxc/test_connection.inventory", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "840bf39cd3675cc46dd72ede6b17ecb9383f97e085b0c194dc33b841213aa886", - "format": 1 - }, - { - "name": "tests/integration/targets/connection_lxd", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/connection_lxd/runme.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ccdab97fd6569dbe495460cfec0b901006efa79ec4c6d066a98250901b0c9d0e", - "format": 1 - }, - { - "name": "tests/integration/targets/connection_lxd/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ec1a8d284cdd3ebfbde0cfecc54f6852263cd47f652c6b1a7bfc1d874fdb6c18", - "format": 1 - }, - { - "name": "tests/integration/targets/connection_lxd/test_connection.inventory", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "44f89257fbaf385b7b113c10a6e47387221ff1a6a851bcf322dfeb55563a3be6", - "format": 1 - }, - { - "name": "tests/integration/targets/connection_posix", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/connection_posix/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ba6bc4b7b7f06e33b61092629dbd2f094b2d814d5cb051650b7494031fba6bea", - "format": 1 - }, - { - "name": "tests/integration/targets/connection_posix/test.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ccdab97fd6569dbe495460cfec0b901006efa79ec4c6d066a98250901b0c9d0e", - "format": 1 - }, - { - "name": "tests/integration/targets/consul", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/consul/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/consul/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0d7dd9e426571c0334ab74bf3c78984772b5478d423fd107c01c504bda6ddb22", - "format": 1 - }, - { - "name": "tests/integration/targets/consul/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/consul/tasks/consul_session.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "db366a4d270e5c06f6202377a2f560685268af6413ae761ea431135b9bdd595f", - "format": 1 - }, - { - "name": "tests/integration/targets/consul/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "33d18fb680c081d59db5d298a72375481db915692463cffd414b51139b0b9f9f", - "format": 1 - }, - { - "name": "tests/integration/targets/consul/templates", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/consul/templates/consul_config.hcl.j2", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c7fa41289f466e39fa1fbb01f05ca4bc39b073d6c96bf35c225c9b1ccc7d61a6", - "format": 1 - }, - { - "name": "tests/integration/targets/consul/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a40969a414a8a84d59746ad2ec3a1b2b697443e715086c01925cc8a163b7aa1a", - "format": 1 - }, - { - "name": "tests/integration/targets/copr", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/copr/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/copr/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9a9f0d9e896761785cfcf1ea2c50cbe7d5e2961ade059cecd549358c050c8faa", - "format": 1 - }, - { - "name": "tests/integration/targets/copr/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ac6f5b8ebe7c5304eaab8f102721e9b25fcb18ff1e35d57e205f2aaa957c4536", - "format": 1 - }, - { - "name": "tests/integration/targets/cpanm", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/cpanm/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/cpanm/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a", - "format": 1 - }, - { - "name": "tests/integration/targets/cpanm/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/cpanm/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1b8163e6b73737e36aa69350e111ee82450f019cee73ec5bd3e6d38393e39363", - "format": 1 - }, - { - "name": "tests/integration/targets/cpanm/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e11285d3c24d91036c84ac9d685ac14affc9303e91d564aa4c61df012b718d88", - "format": 1 - }, - { - "name": "tests/integration/targets/cronvar", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/cronvar/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/cronvar/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0c6d87783ce94ef757b0c55298b080c8f50f00fe51344baa8d122bbcdbbe5cd1", - "format": 1 - }, - { - "name": "tests/integration/targets/cronvar/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/cronvar/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ee06f343a7d6949952623866c8845381ed7fb0231f32e94aa8e780c244e38d8a", - "format": 1 - }, - { - "name": "tests/integration/targets/cronvar/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/cronvar/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ce10d7cd586ffbc352d18965bbc84ea8fce37fcfc20e049ee4ce2864841eb75f", - "format": 1 - }, - { - "name": "tests/integration/targets/cronvar/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "89574b4b847b9f3856aa58220ab1df26bf11517abe25b683576fd1308102b3ac", - "format": 1 - }, - { - "name": "tests/integration/targets/deploy_helper", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/deploy_helper/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/deploy_helper/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1e8d632f9db7209967c5b2f6d734bede09841acc7b898dafc19f31c72cee9929", - "format": 1 - }, - { - "name": "tests/integration/targets/deploy_helper/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/deploy_helper/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "46ba8553a45e25a89033ed888edf302e8009bf1b3d577f62a37efdf2e33836b6", - "format": 1 - }, - { - "name": "tests/integration/targets/deploy_helper/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "811322fad85866ac31f9501f8da0c7072809065397d53c06712447946b746b6b", - "format": 1 - }, - { - "name": "tests/integration/targets/django_manage", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/django_manage/files", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/django_manage/files/base_test", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/django_manage/files/base_test/1045-single-app-project", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/core", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/core/settings.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d7461ca3b1cc9a25a907ca8c9c21ddd3340bf5848de26df4a98c8eac2ec45572", - "format": 1 - }, - { - "name": "tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/manage.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e988f7c59ffd15b88919bd673beff2599b04f1501dd7e93252143e7f7898ddb7", - "format": 1 - }, - { - "name": "tests/integration/targets/django_manage/files/base_test/simple_project", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/django_manage/files/base_test/simple_project/p1", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/settings.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "85e294fc5db7d14dd9083417878601839d56bb166fbcc75d2a294c30e631b493", - "format": 1 - }, - { - "name": "tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/urls.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "535fbfbdfd57ab9ddb8f78425432836748d1a5551c8301c56b25fae52da190ed", - "format": 1 - }, - { - "name": "tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "16f347ddca8f37252548a0c35294400ae4592e77fe03367d2da266c4d10be3a8", - "format": 1 - }, - { - "name": "tests/integration/targets/django_manage/files/base_test/startproj", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/django_manage/files/base_test/startproj/.keep", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/integration/targets/django_manage/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/django_manage/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c2af6405d42a5abd438f4a256a6a5c19b8fb5eb5559209c0cbabd150af911837", - "format": 1 - }, - { - "name": "tests/integration/targets/django_manage/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/django_manage/tasks/main.yaml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "25d9fc3b48e329a06519472febf874f365edb4dc19d9747deec63d874ef8274d", - "format": 1 - }, - { - "name": "tests/integration/targets/django_manage/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2688bf770b1ab976b4c5ecca934a17ff8137e9f2e8e96798f9bb5f2ceb3ec99d", - "format": 1 - }, - { - "name": "tests/integration/targets/dnf_versionlock", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/dnf_versionlock/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/dnf_versionlock/tasks/install.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "480c3223f2fa65f6e3a2d7b99a6a8382d240d8a39a1748e9b6f2e264a70b856c", - "format": 1 - }, - { - "name": "tests/integration/targets/dnf_versionlock/tasks/lock_bash.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "166c2465c0ddb12d8e6fef61a0719a5e011701bece7d59c897d68da2e101cfe9", - "format": 1 - }, - { - "name": "tests/integration/targets/dnf_versionlock/tasks/lock_updates.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "93f4f61d394703660e7b8efb7fdc4185ab7f579cee353e1be2928b6467efd0c5", - "format": 1 - }, - { - "name": "tests/integration/targets/dnf_versionlock/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a046156eb25b05dfca2a5f810ab354067a4cc34f003deb0a5e383eae1e110423", - "format": 1 - }, - { - "name": "tests/integration/targets/dnf_versionlock/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7366ec1af83f8212f093362620022f4a3e9d9de63b38c75466df933839cb1138", - "format": 1 - }, - { - "name": "tests/integration/targets/dpkg_divert", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/dpkg_divert/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/dpkg_divert/tasks/tests", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/dpkg_divert/tasks/tests/01-basic.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "995e695b188db7961d7f3de6fe4270fcacedeadbd3665a90361a984d818188a4", - "format": 1 - }, - { - "name": "tests/integration/targets/dpkg_divert/tasks/tests/02-rename.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5f287a9a4d91cb6207959a2d7c577633a894efc6c74b65972e2eb0d3012792b7", - "format": 1 - }, - { - "name": "tests/integration/targets/dpkg_divert/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "942f62d745fd5f95f3b5ba8c7b2c7eec4b6bbfbead87f342f75c5bff11680fc3", - "format": 1 - }, - { - "name": "tests/integration/targets/dpkg_divert/tasks/prepare.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4a492d008b23484bbe7c6cf2e4af199e619cee6e3f1682e150593d8dc9b8c8f2", - "format": 1 - }, - { - "name": "tests/integration/targets/dpkg_divert/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "aabd5fd2d095f61a8447e26e58c1b1ff4f83adc171edb393a8ebcec7222ca934", - "format": 1 - }, - { - "name": "tests/integration/targets/etcd3", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/etcd3/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/etcd3/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b3065c9d434c22b1009b808c53c5221d9e2f8c201f58d4a71fff2db06cf72a27", - "format": 1 - }, - { - "name": "tests/integration/targets/etcd3/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/etcd3/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "595c566e59d35e9f088f6df364a86f831b79150e7d7dff03d947a0fc61d1e773", - "format": 1 - }, - { - "name": "tests/integration/targets/etcd3/tasks/run_tests.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "efae7ddb7f13fdd93ed0502750a0efce2e042137676e429f22097d8ffbe6aeb4", - "format": 1 - }, - { - "name": "tests/integration/targets/etcd3/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c410d3272c7c8f07c20809aba1af5eacad70c842e5d15c825ca385ac455fd3a9", - "format": 1 - }, - { - "name": "tests/integration/targets/filesize", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filesize/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filesize/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e6209c72ec965475382d3b00ac2a4a421ed6a47d70bcd5ed140aca2d199f7e12", - "format": 1 - }, - { - "name": "tests/integration/targets/filesize/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filesize/tasks/basics.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5ce84cf330b54b496a7767995179c97490b4fe1a7172ce98c118da2d30a830ff", - "format": 1 - }, - { - "name": "tests/integration/targets/filesize/tasks/errors.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "57228fe0f877d6dc8a7ff5f4041bffc02bb90edb8393587ed0d3ef371c2c21ca", - "format": 1 - }, - { - "name": "tests/integration/targets/filesize/tasks/floats.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3b9570f154c40f1775c1318cd68627d57aa0a8fdb4656d2eb420e4b5e0039247", - "format": 1 - }, - { - "name": "tests/integration/targets/filesize/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "307c97bf5bb2d67ee752f2061a35d1550bf6c825faba98cb6b998890359b5bf8", - "format": 1 - }, - { - "name": "tests/integration/targets/filesize/tasks/sparse.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "01371d3811f5b3bcb688300ea0b4caaa90e03215e1c2843304c5be53c5d050b3", - "format": 1 - }, - { - "name": "tests/integration/targets/filesize/tasks/symlinks.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a9382c1cdff793fa48185fc9c6c8b05b2eb40b81264e27edd79d00c37ee5ecc1", - "format": 1 - }, - { - "name": "tests/integration/targets/filesize/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "811322fad85866ac31f9501f8da0c7072809065397d53c06712447946b746b6b", - "format": 1 - }, - { - "name": "tests/integration/targets/filesystem", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filesystem/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filesystem/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c77324654c86a859432f147a34bccb1ae7d77f77d04b8bb07aa7a3fcba7cc51f", - "format": 1 - }, - { - "name": "tests/integration/targets/filesystem/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filesystem/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "55a70f7ff94020af40ac26fb36a89e5669f6ceb19447e7017b271c85c0e7e25f", - "format": 1 - }, - { - "name": "tests/integration/targets/filesystem/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filesystem/tasks/create_device.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f592609a39ca48d4a140172efe623a76d27fbdd446fa3fa4b19e70c9eedc5c73", - "format": 1 - }, - { - "name": "tests/integration/targets/filesystem/tasks/create_fs.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "22b57f1e04f2f4216b640d700c08066dd39d55b0018b988238567232764a3583", - "format": 1 - }, - { - "name": "tests/integration/targets/filesystem/tasks/freebsd_setup.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f4f0c258e9f811c66369538c4b491ab192a65a58bbbccb5f706794cb150cdd33", - "format": 1 - }, - { - "name": "tests/integration/targets/filesystem/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f9380a232b46cb9b9e6ab637e668f68d77aa2beace334b7b497f5d85669a717b", - "format": 1 - }, - { - "name": "tests/integration/targets/filesystem/tasks/overwrite_another_fs.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2e4dbe2f4225d31cbd6d5de99d79eb25f05e39300aaca273f1288d699c032219", - "format": 1 - }, - { - "name": "tests/integration/targets/filesystem/tasks/remove_fs.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d42286f60abec1118afeeab5847e1f680defb7f047b871003eb682d510f310ee", - "format": 1 - }, - { - "name": "tests/integration/targets/filesystem/tasks/setup.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7badcb1afe1eaa2ada350afa495def37bd69032abd912efc342ec89c2171cc95", - "format": 1 - }, - { - "name": "tests/integration/targets/filesystem/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filesystem/vars/Ubuntu-14.04.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d6920cd18ed5a801036ffb67965b492631a80e36d9b800a3bc3ebe8712880c55", - "format": 1 - }, - { - "name": "tests/integration/targets/filesystem/vars/default.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "efead6e1ff36b22afa53723f82f74b980f6ec2fcb899cb29be017b9caaaaaf79", - "format": 1 - }, - { - "name": "tests/integration/targets/filesystem/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "de4b021a7ffae564d5e878e8a94a4121b367cb3b51a9b0dfe2b08943cdafc492", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_counter", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_counter/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_counter/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e1780a85020ca1006bd94e0b0145f6d5b09568b384e2890e09b1f36b5e53e27e", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_counter/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3f754208d937e93c9c873d3b8d73be37d9da93b1136989db5236e75f12854608", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_dict", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_dict/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_dict/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e61381cffc90f48823126efea33cb42daa5ec933b1df8fe1fe6b321d47db9b49", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_dict/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "28d887b76b2449e9952763697c6f74d113f0b9ab49a31fc19581be0be8b31e6a", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_dict_kv", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_dict_kv/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_dict_kv/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9aba3847ffd31a14704ebeccf574c11695676707f74bbab7e2aaa44698bd6483", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_dict_kv/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3f754208d937e93c9c873d3b8d73be37d9da93b1136989db5236e75f12854608", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_from_csv", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_from_csv/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_from_csv/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a584dba9798fb151554dae8caaf4d1c03ed82c01d8df7564fa82dbe1fd03bd7f", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_from_csv/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_from_csv/vars/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1277f87c8d45fc388589abc625eac2f462940208027ceea96cbe84a26958d215", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_from_csv/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3f754208d937e93c9c873d3b8d73be37d9da93b1136989db5236e75f12854608", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_groupby", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_groupby/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_groupby/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "891371439e27b1e84e453be7590313a2cfd91626c2cd44ec1c3e5c030a7c8ea3", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_groupby/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_groupby/vars/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "52327d9500a32db44b09e9a9303e595e83887287ee3d8ecdda41ff9f58eef349", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_groupby/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "28d887b76b2449e9952763697c6f74d113f0b9ab49a31fc19581be0be8b31e6a", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_hashids", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_hashids/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_hashids/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "61fbcd116a06bc7d0dc800980347959f668c5b4c90c9a3af553d11473abd54d9", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_hashids/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_hashids/vars/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ae63247cc858c2f35d9acb7fefccfc73791d0af43fbe28b33556f596c8c2f4a8", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_hashids/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3f754208d937e93c9c873d3b8d73be37d9da93b1136989db5236e75f12854608", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_hashids/runme.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a25a3a5cc05695c1300356e103adcf661d6d755279717a439c46343245aecca2", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_hashids/runme.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "baec4a8a3c194b2779731d613c67960123861a400a80958cd575145e22f09dc9", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_jc", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_jc/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_jc/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "852b469909b319e64fc4f186cbfa812a7f72d389edf3a2b1deaa45ec1b626fe8", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_jc/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1f6109e8ddbb7c2c85cf0d51119d787bafc9d9695dd74bc96d3385fb0a1454d5", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_jc/runme.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d1f42825061f9f041ecbd2e1b6c702a9329d8403692df09d23e4540e36391420", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_jc/runme.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c028125bc1be296044e51b7bdcb04231cb566386c2f935871f0e40b4947eafcc", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_json_query", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_json_query/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_json_query/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "60214d35e8f2e5c325430cd80c635e9d690698824f6f957644bc8e237e1f9638", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_json_query/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_json_query/vars/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "00cfbed7e5d4044baaba437bc0d2ba800f28300042c01c53047d8a4894078eef", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_json_query/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fae4c68ed62f686121e77073f3d43160d927a876417feadfa3be71b063ce9fda", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_json_query/runme.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a955c14afce0e328ea62f806a2d8a9ffdb7032fdba8e7bbcca41616aa3cdff19", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_json_query/runme.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "040ec78afb260ec34ebf5cb4b62cd2203fe9e579f33029d450a92c2f004d3545", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_list", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_list/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_list/tasks/lists_mergeby_2-10.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9c901abb1d2647356f2ac233b9f6cf71de132368dc7af7f4e05bdf28fc2cfc6f", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_list/tasks/lists_mergeby_default.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bc55b21ce77cedb27b721178bf96c11afaada2438be2fb0d32e33eea993a79b3", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_list/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "176fc3bd46d769a0e2f8a7dc2bb60aa48e9043c22139261dce82b1a25cbb8dc0", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_list/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_list/vars/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "34dff693363be5fea786490fc443495aa7f392dbf14181b7ff3b1dc6ceaf0421", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_list/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3f754208d937e93c9c873d3b8d73be37d9da93b1136989db5236e75f12854608", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_path_join_shim", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_path_join_shim/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_path_join_shim/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2ea941e7f4e3e218323cd9ee178197e6cccc0cffb6e62f873e0415e9e498392c", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_path_join_shim/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cd53f6f278376c3ddfc1f7e8f51c5d569be05463717d2f5aa6ab60ee03ab7513", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_random_mac", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_random_mac/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_random_mac/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1e8d632f9db7209967c5b2f6d734bede09841acc7b898dafc19f31c72cee9929", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_random_mac/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_random_mac/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9d9493241922175b56e14798caf608b1567ec0f1d5f521e375bced0e97259a10", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_random_mac/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fae4c68ed62f686121e77073f3d43160d927a876417feadfa3be71b063ce9fda", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_time", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_time/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_time/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9ca76bc55b0ed8540c4f3d578884ef636c7b66c1a1cc089e9be629216e45df66", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_time/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3f754208d937e93c9c873d3b8d73be37d9da93b1136989db5236e75f12854608", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_unicode_normalize", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_unicode_normalize/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_unicode_normalize/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c70a784f3b5c42df4e73ca0a58d524441364b5f2343e5a58a39895a7b302e2d3", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_unicode_normalize/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_unicode_normalize/vars/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "02faaf48fbb2fa8d25c1eec1df2ab5c75e67646131c5738a66568c2083b1031a", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_unicode_normalize/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3f754208d937e93c9c873d3b8d73be37d9da93b1136989db5236e75f12854608", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_version_sort", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_version_sort/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/filter_version_sort/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7e92321cf5603bc9b08bc384259a7235d2329dcb42029600b75aa0076ce8c98d", - "format": 1 - }, - { - "name": "tests/integration/targets/filter_version_sort/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3f754208d937e93c9c873d3b8d73be37d9da93b1136989db5236e75f12854608", - "format": 1 - }, - { - "name": "tests/integration/targets/flatpak", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/flatpak/files", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/flatpak/files/serve.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "635842d29f27e7e27f969969bcabbb76807a7a1e8f092b834f10209acfa253f6", - "format": 1 - }, - { - "name": "tests/integration/targets/flatpak/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/flatpak/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d1f270e41b71c8ccec217bd5e5c397cf527c856657a81a463eaae0b06130f9c7", - "format": 1 - }, - { - "name": "tests/integration/targets/flatpak/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/flatpak/tasks/check_mode.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d01f61ca0d7b950f29ff070e7f169561e718a19dbb68c9deb8ac0af480a005fd", - "format": 1 - }, - { - "name": "tests/integration/targets/flatpak/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3ff8e182f408bb9b8f0785a037ebb41e314bf7083acc2fd7c579cf8dd643b049", - "format": 1 - }, - { - "name": "tests/integration/targets/flatpak/tasks/setup.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fd7f915a42855db32f162d62d04d7583aa298d2d494388888cf341393043e374", - "format": 1 - }, - { - "name": "tests/integration/targets/flatpak/tasks/test.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "835f7bf8121c4b787856213e3f218dabfbe703aae6cc87bc4b4aa9dec6f53df1", - "format": 1 - }, - { - "name": "tests/integration/targets/flatpak/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4e8c4d4bf2055db6c7160889d246b648084fa9990fe4b6ff4197e30ebfc62b42", - "format": 1 - }, - { - "name": "tests/integration/targets/flatpak_remote", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/flatpak_remote/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/flatpak_remote/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d1f270e41b71c8ccec217bd5e5c397cf527c856657a81a463eaae0b06130f9c7", - "format": 1 - }, - { - "name": "tests/integration/targets/flatpak_remote/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/flatpak_remote/tasks/check_mode.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "467e3aec9b6edf845929a1690aba9762b6b7add57207cb503169aa9874705540", - "format": 1 - }, - { - "name": "tests/integration/targets/flatpak_remote/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d33778878cb7d90135519335485e691d5f31240507136a7bd6322a218eff7b51", - "format": 1 - }, - { - "name": "tests/integration/targets/flatpak_remote/tasks/setup.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "173148e596fb6f57b0b03279f0523b75b1ca7d079c9705c022100afd5fd58d75", - "format": 1 - }, - { - "name": "tests/integration/targets/flatpak_remote/tasks/test.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4c7ab224966913a081cb0cb96619472eb6d5caa5afdcb44c219872027da9451c", - "format": 1 - }, - { - "name": "tests/integration/targets/flatpak_remote/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4e8c4d4bf2055db6c7160889d246b648084fa9990fe4b6ff4197e30ebfc62b42", - "format": 1 - }, - { - "name": "tests/integration/targets/gandi_livedns", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gandi_livedns/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gandi_livedns/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d2541cfa23d346daeb3fa38ca215ddab183bedb0e62d95f8e71e6af9c98973bb", - "format": 1 - }, - { - "name": "tests/integration/targets/gandi_livedns/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gandi_livedns/tasks/create_record.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8474f529fce8a5c495b4779c788326a97c91ac317789dabb68fa009c67d97445", - "format": 1 - }, - { - "name": "tests/integration/targets/gandi_livedns/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "57b334769ad3dca857cb9dbef6ceea4269fb571fa4c578930e83fefa5159ed19", - "format": 1 - }, - { - "name": "tests/integration/targets/gandi_livedns/tasks/record.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1fc4e933f7c9cab8cb023644431912b892d65ca290fad2a3201f100748609a59", - "format": 1 - }, - { - "name": "tests/integration/targets/gandi_livedns/tasks/remove_record.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "eb5a0a923ae8f004baad260ce26fe58901c536b0b6352c6e9420bdba6e474bae", - "format": 1 - }, - { - "name": "tests/integration/targets/gandi_livedns/tasks/update_record.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "48ed2f4937679170c6eb87f79e40a0749a61bb738718cb8d2d9f10fd4e4a0970", - "format": 1 - }, - { - "name": "tests/integration/targets/gandi_livedns/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4cc754ef899091a5d995b8f57081212b5c66a93c14811173ddff5877c14bc544", - "format": 1 - }, - { - "name": "tests/integration/targets/gem", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gem/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gem/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "769fd1fe7bec847f2f097de2fc87ddc3062a542f24ea15a783fcea9e18b038f7", - "format": 1 - }, - { - "name": "tests/integration/targets/gem/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gem/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "aec0ba5bc5fd1823b16807705eaf33555a705fda2e116d21846eaa79b0ce1448", - "format": 1 - }, - { - "name": "tests/integration/targets/gem/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gem/vars/FreeBSD.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1bfdff73d0b467666b98caf7ca46c8ae4d1f4d5a21c08d83484718ff3a768a9b", - "format": 1 - }, - { - "name": "tests/integration/targets/gem/vars/RedHat.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e539831c03045b807895fbf0f8da5f13e1d3a6a9aed78d1f3946a0fdf3678359", - "format": 1 - }, - { - "name": "tests/integration/targets/gem/vars/default.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "37a9c91c779fc3095ab2e480b350e360bb26988d8e0bd3b8f546ce8b538b0f8e", - "format": 1 - }, - { - "name": "tests/integration/targets/gem/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "de4b021a7ffae564d5e878e8a94a4121b367cb3b51a9b0dfe2b08943cdafc492", - "format": 1 - }, - { - "name": "tests/integration/targets/git_config", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/git_config/files", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/git_config/files/gitconfig", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d709a94ee4ce2ab24b5344060dd6553680071da85b7f216511712d365086c68f", - "format": 1 - }, - { - "name": "tests/integration/targets/git_config/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/git_config/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1e8d632f9db7209967c5b2f6d734bede09841acc7b898dafc19f31c72cee9929", - "format": 1 - }, - { - "name": "tests/integration/targets/git_config/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/git_config/tasks/exclusion_state_list-all.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "19413ce550186861d20f0ac1ffd3b060b447f928a7f07d993abde073258d6b57", - "format": 1 - }, - { - "name": "tests/integration/targets/git_config/tasks/get_set_no_state.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b8bd84dbf2ffdedcfe984e192ab036e4486033b782311293bbd9e0ad6fa349b7", - "format": 1 - }, - { - "name": "tests/integration/targets/git_config/tasks/get_set_state_present.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "43513de76f25e00c1b85cbe49f237c6f93e00d4d8f4524ad7eaae8be5823b687", - "format": 1 - }, - { - "name": "tests/integration/targets/git_config/tasks/get_set_state_present_file.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f92ed3092d13cb1bdb07ef0c98e7eb284ab9cd94d44c5f360ab896d8d0367e9b", - "format": 1 - }, - { - "name": "tests/integration/targets/git_config/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e51f44e6962edc1796f3c57cf9036edf0a3b5d5b7da8e6d252f107268c61816f", - "format": 1 - }, - { - "name": "tests/integration/targets/git_config/tasks/precedence_between_unset_and_value.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a6ef0240d167a91fa0cfe6c5e508d29cd22fe026743b34183851f9ce0f0b8193", - "format": 1 - }, - { - "name": "tests/integration/targets/git_config/tasks/set_value_with_tilde.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e9b96f0949d8aaf9116677176161c6b10432979a01b844b15c3dc520d4a1185f", - "format": 1 - }, - { - "name": "tests/integration/targets/git_config/tasks/setup.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d202854e0c11a186e10df5e9d10fd9c4f2b6496b99a96c54bbac94a6e5a99e1f", - "format": 1 - }, - { - "name": "tests/integration/targets/git_config/tasks/setup_no_value.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "42a2ba7577089c90c9ab133845c765bc9fc7bc419396a85d6a6d13be068b0b20", - "format": 1 - }, - { - "name": "tests/integration/targets/git_config/tasks/setup_value.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5d3c152303bdc4d895e5cad7b21ac2fbf74e0511def7383a6baf449618404532", - "format": 1 - }, - { - "name": "tests/integration/targets/git_config/tasks/unset_check_mode.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "70e35d7994207a6ef6575db2a3bfb2ffc3fbc9abbe4e037ea2718da576a11468", - "format": 1 - }, - { - "name": "tests/integration/targets/git_config/tasks/unset_no_value.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1cfd693a2fe6a99cc06e5fb9eaf6cf41dc5fdcb78f9a1d775584e1868b979dc2", - "format": 1 - }, - { - "name": "tests/integration/targets/git_config/tasks/unset_value.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "efd37305a713b60bb4da0dd8b714d26c061fb3a23a01b6fa188a73fd59e1425b", - "format": 1 - }, - { - "name": "tests/integration/targets/git_config/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/git_config/vars/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4fa6677756ae0c25fc29eb8ee5b7e8cd9cb31b303b6ce8bb053a96f57c1bd874", - "format": 1 - }, - { - "name": "tests/integration/targets/git_config/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a2ba48cd370e3f4846785bd8b11b3de27d24f9d47e94b17659aba32ed054bd38", - "format": 1 - }, - { - "name": "tests/integration/targets/github_issue", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/github_issue/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/github_issue/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "dc974ee4cab09ff69efa27672a00535d8dfb457584857c7ab320610d8d072bd9", - "format": 1 - }, - { - "name": "tests/integration/targets/github_issue/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/github_issue/vars/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6c3e159cbba9dc6d749a2b27f310a79e73dd79f8be491192c16864e709c12814", - "format": 1 - }, - { - "name": "tests/integration/targets/github_issue/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f", - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_branch", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_branch/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_branch/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3c343d7bbf23d85e04b47eb5a166ae794148dd7a91ea21e9f4da540c0f4fd0cf", - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_branch/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_branch/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fcf47339825c9d5962b27534871fbfedb2b363fa2590d562a4d90c655d698db0", - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_branch/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "77f2ba9334ca23502cad6112f276bf4b73433d1d77f816af6860f85055374aba", - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_deploy_key", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_deploy_key/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_deploy_key/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c1e7460fc001a7794a63a5ff0df53ebf4bfbc01c07aa8dc5f4668a0981bcba64", - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_deploy_key/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_deploy_key/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0542fd9f80a7fc8960372206b7c61b632f4592b2f7672d852d9aa8a88b9aa168", - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_deploy_key/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c8d60d4380e3eb4babd9d5866b3c3c7cc11cc441302eef5398edf8b12d2cf16a", - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_group", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_group/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_group/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4b4c1cfacea788e6b9ce76d8b4d1d3c0bacef30ba57e5af64624c72163c12509", - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_group/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_group/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "644365c3bb994e4e94f0b184639d90e1d61e201f44f873aa69677cb0d81b2407", - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_group/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c8d60d4380e3eb4babd9d5866b3c3c7cc11cc441302eef5398edf8b12d2cf16a", - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_group_members", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_group_members/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_group_members/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c1cf2551973b392d2e4561a3863809c6e905e898e594486edfbde0ddb0ef2ad9", - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_group_members/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_group_members/vars/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2c3a1b5d2012b8506e5943d3ffc37dcce4396e8a3d605c9a2c3817ec6b18fc61", - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_group_members/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "60945d49535300be8e42108658dba31fcd5d665fc40d6f186798e7e0682320ae", - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_group_variable", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_group_variable/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_group_variable/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fa1ab4cf78f7a90c216925686d753f7eaffb53e3882357404f80eec20d5e6d5b", - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_group_variable/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656", - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_hook", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_hook/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_hook/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8ed33d526fae3cdff057b6022f77a7c4d32b7498112efaa0cb7641e69bec96e0", - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_hook/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_hook/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0c4931f013923286bfbebcf960b7e40393eebe4f174bf06dcac98844b38e69f6", - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_hook/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c8d60d4380e3eb4babd9d5866b3c3c7cc11cc441302eef5398edf8b12d2cf16a", - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_project", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_project/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_project/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bdca29ef497b21e9bfbb51f911df8c1cc13a3010965f5349d4cc358b1688aff1", - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_project/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_project/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1f903f10210e52835f15d7e619c0bf59f5ab95ad2b220e11838c2c7b50db00d6", - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_project/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c8d60d4380e3eb4babd9d5866b3c3c7cc11cc441302eef5398edf8b12d2cf16a", - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_project_members", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_project_members/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_project_members/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a2bb1c92d8298e5cdeb1a98cec0fbff5b7da4d8897d34516e4b72e8f759f108e", - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_project_members/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_project_members/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "420519b3c8476fc0a98089d738434f8f1909ae1e5ed2602b9bde86470455a12a", - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_project_members/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "60945d49535300be8e42108658dba31fcd5d665fc40d6f186798e7e0682320ae", - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_project_variable", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_project_variable/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_project_variable/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "92e7e6fa801a0ab0a48db25e740d0a227211ac0b982c68d68e9ba83fac6b307f", - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_project_variable/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656", - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_runner", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_runner/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_runner/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2c7e14b41d8d4ada634180cd2ce6e5ac801c719b1742762fa79cc9287215e020", - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_runner/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_runner/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c4b0f7fb12d1c5a1dd43495ae9bb60911ef077c599d667af2406b4bfe305e4cc", - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_runner/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c8d60d4380e3eb4babd9d5866b3c3c7cc11cc441302eef5398edf8b12d2cf16a", - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_user", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_user/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_user/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a67c504937b390cfd08e7428deeacf53b2fbcfbada9cc952723bc36ab4451a27", - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_user/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_user/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0f2ca94e7f32306a0640dc76882d5ea6cb6517fb28fa582ab5160591ef8150dd", - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_user/tasks/sshkey.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2569ea4605ff02865a17ea59758a3f1326b4675915fccaedc504535cef986965", - "format": 1 - }, - { - "name": "tests/integration/targets/gitlab_user/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c8d60d4380e3eb4babd9d5866b3c3c7cc11cc441302eef5398edf8b12d2cf16a", - "format": 1 - }, - { - "name": "tests/integration/targets/hg", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/hg/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/hg/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "769fd1fe7bec847f2f097de2fc87ddc3062a542f24ea15a783fcea9e18b038f7", - "format": 1 - }, - { - "name": "tests/integration/targets/hg/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/hg/tasks/install.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "084fe24287e02f0091df7cb1e00a4ab15a8ef021fc39e3e52dada5a990815991", - "format": 1 - }, - { - "name": "tests/integration/targets/hg/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9bcb9f54c987dff4eb2cf6334ab977cc8c6e0d93083ed06d1e9e684375a95ee5", - "format": 1 - }, - { - "name": "tests/integration/targets/hg/tasks/run-tests.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3c1e0433e03ca5fcd553ec8009f5df158e147853ca962f83428a0c1000e4d087", - "format": 1 - }, - { - "name": "tests/integration/targets/hg/tasks/uninstall.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b94a8d10092fc2781587cbd2ae5bffa1909832af5780852b03be4124fd393baf", - "format": 1 - }, - { - "name": "tests/integration/targets/hg/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7a0d5b9fbb9f7be73ffd1f589ddf8b702d470b7539576afc31855cad91860a08", - "format": 1 - }, - { - "name": "tests/integration/targets/homebrew", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/homebrew/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/homebrew/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f4fa733d9f1ccf34b15ecf01294b5b3195d76b8b78b5f0414580160e2109b02c", - "format": 1 - }, - { - "name": "tests/integration/targets/homebrew/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9aa01df43b8368ff8ff2f27edfd594993bdcdacf76eb59889807346b97422b0e", - "format": 1 - }, - { - "name": "tests/integration/targets/homebrew_cask", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/homebrew_cask/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/homebrew_cask/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1ab7a687bd7881466d582bfb1345cef7b657ca8fc5972c048211de3bcfb4880f", - "format": 1 - }, - { - "name": "tests/integration/targets/homebrew_cask/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/homebrew_cask/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "29afac27fd3f252b502b4b9ce4aba286fd2c516e87a9c18ea8ba9d5995d4349f", - "format": 1 - }, - { - "name": "tests/integration/targets/homebrew_cask/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9aa01df43b8368ff8ff2f27edfd594993bdcdacf76eb59889807346b97422b0e", - "format": 1 - }, - { - "name": "tests/integration/targets/homectl", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/homectl/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/homectl/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "23ad96b79be8312061f1981311a122382fe4703760d3e27c26bef941fa3b64d9", - "format": 1 - }, - { - "name": "tests/integration/targets/homectl/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ebd3c7b8ac2c2ea2e54b29a558aaa75fee29d69bdc957e1c8da6982527017cfc", - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_ecs_instance", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_ecs_instance/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_ecs_instance/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "de5cf8bd1d385eefa5fe8257ffdb286d58332a7f1a2a1604536c8721d5536a4e", - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_ecs_instance/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656", - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_evs_disk", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_evs_disk/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_evs_disk/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a09fbc9b56804ffc0541a1b7fe76b68968335bfd7abc8225881d3bbd726b291d", - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_evs_disk/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656", - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_network_vpc", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_network_vpc/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_network_vpc/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1509b38a85cee8bdfe771c6d76fe47f4f137a6c9222a19eeeacf1371a5e26467", - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_network_vpc/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656", - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_smn_topic", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_smn_topic/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_smn_topic/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "54bf4ac5512282d30aaa5850e3fcb74659f5a692a71c0172969d45451bb10177", - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_smn_topic/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656", - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_vpc_eip", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_vpc_eip/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_vpc_eip/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b5699c9dd705dad64044145400ad39335bd81b444095dd999fe7f58c6339e7ec", - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_vpc_eip/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656", - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_vpc_peering_connect", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_vpc_peering_connect/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_vpc_peering_connect/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e261ffb4bbc2eadceb1ea8da7920b6f76a7961aec22eae08c139eb165cce1e96", - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_vpc_peering_connect/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656", - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_vpc_port", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_vpc_port/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_vpc_port/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "75820ad044e6554a2cc7f5fee0acf721cd4f9d9134741ed228512190ad27b760", - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_vpc_port/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656", - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_vpc_private_ip", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_vpc_private_ip/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_vpc_private_ip/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "92f41d7c43c5ca3572571fe48a3863d87cfcd3b816a698628ebcec600449b773", - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_vpc_private_ip/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656", - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_vpc_route", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_vpc_route/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_vpc_route/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5c9bd96aa2b256b53990d2dc16cdc80e1ce4481c2ca5f47937877c716b514497", - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_vpc_route/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656", - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_vpc_security_group", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_vpc_security_group/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_vpc_security_group/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3262d580c2976d2486690d580e450acdbfbaa461c31ca8910e8e236de3c41db3", - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_vpc_security_group/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656", - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_vpc_security_group_rule", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_vpc_security_group_rule/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_vpc_security_group_rule/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cbf8477e586f8bb57278d7d7e64f132d814916a864f50b3e8dd229125ddc8ede", - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_vpc_security_group_rule/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656", - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_vpc_subnet", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_vpc_subnet/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_vpc_subnet/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cd1ea2b20a87c7d012cd2c523c12c3cbbc59e85ff6a8cad5b17ad1f0069e28a5", - "format": 1 - }, - { - "name": "tests/integration/targets/hwc_vpc_subnet/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656", - "format": 1 - }, - { - "name": "tests/integration/targets/ilo_redfish_config", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/ilo_redfish_config/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/ilo_redfish_config/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "537fd2fdc275af6ba5b5aa64dafa7adf8679efbcf8f1d6c4563d8e5b4b8c8c0c", - "format": 1 - }, - { - "name": "tests/integration/targets/ilo_redfish_config/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656", - "format": 1 - }, - { - "name": "tests/integration/targets/ilo_redfish_info", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/ilo_redfish_info/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/ilo_redfish_info/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a851c988a2b856c560749ea717a9f38fb2501392ca984038054ee4114a7ae437", - "format": 1 - }, - { - "name": "tests/integration/targets/ilo_redfish_info/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656", - "format": 1 - }, - { - "name": "tests/integration/targets/influxdb_user", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/influxdb_user/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/influxdb_user/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "158ad00ea1445a97e1c5b6b0e12365134587a090b899a5b01bd76beec2dd8e22", - "format": 1 - }, - { - "name": "tests/integration/targets/influxdb_user/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/influxdb_user/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9e631aa1aca12eb260bbb34bd7f771b07a965d504095be4569a148f400ec2328", - "format": 1 - }, - { - "name": "tests/integration/targets/influxdb_user/tasks/tests.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a185251f0cf4666d4fe3e2ae59c3da7366fb995b786274d4408f65f149c85272", - "format": 1 - }, - { - "name": "tests/integration/targets/influxdb_user/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5dc95ef850dbe4e8cdbdcf90c74b390fcf1ad12cf3b15de1b037c5a06194fb28", - "format": 1 - }, - { - "name": "tests/integration/targets/ini_file", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/ini_file/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/ini_file/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1e8d632f9db7209967c5b2f6d734bede09841acc7b898dafc19f31c72cee9929", - "format": 1 - }, - { - "name": "tests/integration/targets/ini_file/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/ini_file/tasks/tests", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/ini_file/tasks/tests/00-basic.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d02f0c61deaa8879f18cb4b7b077be3ad6979356969d073b368e77a51022b5d6", - "format": 1 - }, - { - "name": "tests/integration/targets/ini_file/tasks/tests/01-value.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "976c5d3b0a55c5801f8bf52065dcbfb09e2a64cae0e5bde38bee69056aaccdf2", - "format": 1 - }, - { - "name": "tests/integration/targets/ini_file/tasks/tests/02-values.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "acb18d72ee309d2fa719977a7b1580c7a6e2095dbf666af5db99d8a6dadeb425", - "format": 1 - }, - { - "name": "tests/integration/targets/ini_file/tasks/tests/03-encoding.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1da025b5496ab1abf9bdc41b92f6ee43aff0700ab392733daa4bade024c9a668", - "format": 1 - }, - { - "name": "tests/integration/targets/ini_file/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "faf98414a89b98ff97eaea421694f7fe9e936d33dcc72812b7b8ea36a1c04472", - "format": 1 - }, - { - "name": "tests/integration/targets/ini_file/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e5a8866bb35edfbfde89676bc1698a7dd27c89530da249de619169892e7482d1", - "format": 1 - }, - { - "name": "tests/integration/targets/interfaces_file", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/interfaces_file/files", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/interfaces_file/files/interfaces_ff", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a1e8604733a58feb25d85d2693040ddfce90486d641ab06fa289d45a27ce090d", - "format": 1 - }, - { - "name": "tests/integration/targets/interfaces_file/files/interfaces_ff_3841", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "12b375a22f718f2244edbc059c736d141f7944d989fb709159578cc65891506e", - "format": 1 - }, - { - "name": "tests/integration/targets/interfaces_file/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/interfaces_file/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1e8d632f9db7209967c5b2f6d734bede09841acc7b898dafc19f31c72cee9929", - "format": 1 - }, - { - "name": "tests/integration/targets/interfaces_file/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/interfaces_file/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8f57f67f3df8bafd2118721ad3a5ecc85c28ba9c69abe39a955b7de8f6525b20", - "format": 1 - }, - { - "name": "tests/integration/targets/interfaces_file/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e5a8866bb35edfbfde89676bc1698a7dd27c89530da249de619169892e7482d1", - "format": 1 - }, - { - "name": "tests/integration/targets/ipify_facts", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/ipify_facts/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/ipify_facts/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "222d6440e89b804afdb94b10bf7407c0aaddcb63d68658efdffbf48e868723ad", - "format": 1 - }, - { - "name": "tests/integration/targets/ipify_facts/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/ipify_facts/vars/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ebe010ed6441ca00f76d559f7645469f089a40f79cdc3fb0af3511dd0e03222c", - "format": 1 - }, - { - "name": "tests/integration/targets/ipify_facts/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e5a8866bb35edfbfde89676bc1698a7dd27c89530da249de619169892e7482d1", - "format": 1 - }, - { - "name": "tests/integration/targets/iptables_state", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/iptables_state/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/iptables_state/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a", - "format": 1 - }, - { - "name": "tests/integration/targets/iptables_state/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/iptables_state/tasks/tests", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/iptables_state/tasks/tests/00-basic.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2cfe87661c422f8bec04b806d6e906353dc259d55e6a2b6a18c278a0d3e0d90d", - "format": 1 - }, - { - "name": "tests/integration/targets/iptables_state/tasks/tests/01-tables.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cedfd25751938db5690276e1832ab4f0f4eb88d92a7454fb9e0d9d2dda11b3d8", - "format": 1 - }, - { - "name": "tests/integration/targets/iptables_state/tasks/tests/10-rollback.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0f5f17a3117bb19cf75aa1969b637271124d44e50a776c737e5b718fea131738", - "format": 1 - }, - { - "name": "tests/integration/targets/iptables_state/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3688529100e39015e96632ce94deca3f35fa32c6dc96e4cecee76484b0e7ea2a", - "format": 1 - }, - { - "name": "tests/integration/targets/iptables_state/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "519a3d522001e03d5b1a6676ae34217f74d21b0c07c64ed8d5d0a7ef5226d8ca", - "format": 1 - }, - { - "name": "tests/integration/targets/ipwcli_dns", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/ipwcli_dns/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/ipwcli_dns/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "dc9f592affb4dffd770f50930869e51394e7907cf5352f17a48491153cedbbf0", - "format": 1 - }, - { - "name": "tests/integration/targets/ipwcli_dns/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "536a08c5b55f0ea5d5b58de3d90806e1341df3c8c1c568dc2494be42afb1e73f", - "format": 1 - }, - { - "name": "tests/integration/targets/iso_create", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/iso_create/files", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/iso_create/files/test_dir", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/iso_create/files/test_dir/test2.cfg", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "52e199689c2481bec1de30c69fe948823b576f222a75360cc7ef7da65578262a", - "format": 1 - }, - { - "name": "tests/integration/targets/iso_create/files/test1.cfg", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "52e199689c2481bec1de30c69fe948823b576f222a75360cc7ef7da65578262a", - "format": 1 - }, - { - "name": "tests/integration/targets/iso_create/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/iso_create/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "769fd1fe7bec847f2f097de2fc87ddc3062a542f24ea15a783fcea9e18b038f7", - "format": 1 - }, - { - "name": "tests/integration/targets/iso_create/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/iso_create/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fa6468f53938b73f79347dc9b29706a1321ac1247e813925e0c95a05d9ae1c5b", - "format": 1 - }, - { - "name": "tests/integration/targets/iso_create/tasks/prepare_dest_dir.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "df4f88a131096ff24fdf30a18c7d6f11d5cd020d2064e94fe8da9b1e6b1c9068", - "format": 1 - }, - { - "name": "tests/integration/targets/iso_create/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f4de83e34024a07b9684aab1f4a6217aa205a7737493fafe42f6ff40141eec04", - "format": 1 - }, - { - "name": "tests/integration/targets/iso_extract", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/iso_extract/files", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/iso_extract/files/test.iso", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9c5a1719603516790e3a007e17f28e28fca7eb5ec8d6205692e906d4239fe068", - "format": 1 - }, - { - "name": "tests/integration/targets/iso_extract/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/iso_extract/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "eb510400875f8efd1df6eb1c488c7f645f120f0e618e2169920c33492102b429", - "format": 1 - }, - { - "name": "tests/integration/targets/iso_extract/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/iso_extract/tasks/7zip.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "975b4509c955dc0f879211315f91331e092fa2a713dcfdcf2256454d1dfbbaac", - "format": 1 - }, - { - "name": "tests/integration/targets/iso_extract/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "61349cf9d51fd68d556001198120758755862b0758e57fc4201594731c7711ec", - "format": 1 - }, - { - "name": "tests/integration/targets/iso_extract/tasks/prepare.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e886917ac7434f5740c2cc2b61107b7c40b32934959ba594ef47549629550034", - "format": 1 - }, - { - "name": "tests/integration/targets/iso_extract/tasks/tests.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3bce6efd1142cf1138d0a8c7ab3e1ef038e15471dfa9be2738451c5a0e50e4cd", - "format": 1 - }, - { - "name": "tests/integration/targets/iso_extract/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/iso_extract/vars/Alpine.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6a7cb2dcfc68f900b0a830283aa6117f5390a63f7361f0de0aac5d2bb5b5e96b", - "format": 1 - }, - { - "name": "tests/integration/targets/iso_extract/vars/Archlinux.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6a7cb2dcfc68f900b0a830283aa6117f5390a63f7361f0de0aac5d2bb5b5e96b", - "format": 1 - }, - { - "name": "tests/integration/targets/iso_extract/vars/Debian.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "26272cf027dc30fcd95df70e0b3aa3656228b0a3285e48aae289bb649dc4dc23", - "format": 1 - }, - { - "name": "tests/integration/targets/iso_extract/vars/FreeBSD.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6a7cb2dcfc68f900b0a830283aa6117f5390a63f7361f0de0aac5d2bb5b5e96b", - "format": 1 - }, - { - "name": "tests/integration/targets/iso_extract/vars/RedHat.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b294d90c2772adc4f7a238290888129b12021d8b6d23b97589f81e72befac2a1", - "format": 1 - }, - { - "name": "tests/integration/targets/iso_extract/vars/Suse.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ff54674472190ab490cccbe6e71e3226bc5640b41776bc8453592e587da5cd13", - "format": 1 - }, - { - "name": "tests/integration/targets/iso_extract/vars/Ubuntu.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "26272cf027dc30fcd95df70e0b3aa3656228b0a3285e48aae289bb649dc4dc23", - "format": 1 - }, - { - "name": "tests/integration/targets/iso_extract/vars/default.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/integration/targets/iso_extract/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8e9c8f4f328cd81650a403cdfbe34be2f0460da90e460a962943ed635633bc05", - "format": 1 - }, - { - "name": "tests/integration/targets/java_cert", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/java_cert/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/java_cert/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c8fef72781f995752ac2345d27dfebb882a0f4bfa096d9a3308c8b0f7e7381ec", - "format": 1 - }, - { - "name": "tests/integration/targets/java_cert/files", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/java_cert/files/setupSSLServer.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e073d13bc2dbee83b1b6265d4ac781fce844c668b8249948924481629f9853d1", - "format": 1 - }, - { - "name": "tests/integration/targets/java_cert/files/testpkcs.p12", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "194ae4f77eeaf175ebefa471eced93551d2b9f0a0018e9bfd0a24cb0acc380da", - "format": 1 - }, - { - "name": "tests/integration/targets/java_cert/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/java_cert/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "efaf82279408067868c1523dc741ac54e4e178cdea5c72e568323b9fef478a6c", - "format": 1 - }, - { - "name": "tests/integration/targets/java_cert/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/java_cert/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ef674f384bef0618944aaac1aadd8c8974a87970b14dcfa0c4cd61188eb60cd7", - "format": 1 - }, - { - "name": "tests/integration/targets/java_cert/tasks/state_change.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "662609b6a22f94aa6aebe1673f99b3ec0183bbed70cd5173ab711d0d866d040f", - "format": 1 - }, - { - "name": "tests/integration/targets/java_cert/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f15869163e4716570ba32825c2afa82d1a0553a245b6c483d0e852ec8a5ee826", - "format": 1 - }, - { - "name": "tests/integration/targets/java_keystore", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/java_keystore/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/java_keystore/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d8d889797b46d3474b92f0f707eb37ded692bb9a10c5ae96d0f6eafc999917a8", - "format": 1 - }, - { - "name": "tests/integration/targets/java_keystore/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/java_keystore/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "efaf82279408067868c1523dc741ac54e4e178cdea5c72e568323b9fef478a6c", - "format": 1 - }, - { - "name": "tests/integration/targets/java_keystore/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/java_keystore/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a20ff593bac026b2e925c07b9b9bcfb7e3da90a575b1940fcf185789d3683d3c", - "format": 1 - }, - { - "name": "tests/integration/targets/java_keystore/tasks/prepare.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d39bc52885014bc3f14a04fd39fc15feaa1950cc0d9b1d5bbd3982b65a05ea03", - "format": 1 - }, - { - "name": "tests/integration/targets/java_keystore/tasks/tests.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6f8e5cb58596bb5a36ca750744922904b610a6bf6ce2a9f1508b8022da686a0e", - "format": 1 - }, - { - "name": "tests/integration/targets/java_keystore/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f15869163e4716570ba32825c2afa82d1a0553a245b6c483d0e852ec8a5ee826", - "format": 1 - }, - { - "name": "tests/integration/targets/jboss", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/jboss/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/jboss/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "653439bf464124f10cbf0e09beba4e37fdbb99fc3ac82ffbdb48d8c5a6f23874", - "format": 1 - }, - { - "name": "tests/integration/targets/jboss/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/jboss/tasks/jboss.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8873ae18f2441425cbe73266fcbfe9ff8580a81e5e3c98245f736b92c7e3b79e", - "format": 1 - }, - { - "name": "tests/integration/targets/jboss/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f186d67ed2a7b23c533cdae8cea758cb8ffa2dbd5858b64ea4be861d1dfd922a", - "format": 1 - }, - { - "name": "tests/integration/targets/jboss/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4eb67a6465d730c165121c76bcb6825d72c1894f6ba3a3c797d6a11b8b4e687c", - "format": 1 - }, - { - "name": "tests/integration/targets/jira", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/jira/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/jira/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "461146e6c4e20c2980f2a774e26f8d7aaaf29c36f696dec6c4ab9de528465954", - "format": 1 - }, - { - "name": "tests/integration/targets/jira/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/jira/vars/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d7f9490ec266ebf584e612c75f7ed8ea51ec1393428f1fa8f8bffe9e909e3755", - "format": 1 - }, - { - "name": "tests/integration/targets/jira/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "44808479bfd6a34a9ae1631f02d8805a5fbdc06a9a1ed80146de1bf1d2607cac", - "format": 1 - }, - { - "name": "tests/integration/targets/kernel_blacklist", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/kernel_blacklist/files", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/kernel_blacklist/files/blacklist", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ee4d3ecffc31f6d6f12f30e3934e36ab7f523d920e428b31c23d7c8509e2f2c7", - "format": 1 - }, - { - "name": "tests/integration/targets/kernel_blacklist/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/kernel_blacklist/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1e8d632f9db7209967c5b2f6d734bede09841acc7b898dafc19f31c72cee9929", - "format": 1 - }, - { - "name": "tests/integration/targets/kernel_blacklist/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/kernel_blacklist/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "00ecb0d28591430779770a2736ba6a9f93b636ec7acb606abf15cd8c99ebc690", - "format": 1 - }, - { - "name": "tests/integration/targets/kernel_blacklist/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "811322fad85866ac31f9501f8da0c7072809065397d53c06712447946b746b6b", - "format": 1 - }, - { - "name": "tests/integration/targets/keycloak_client", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/keycloak_client/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/keycloak_client/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8783e53bdddaad295467048e483edecd1176bb2127b98c09ba8b45a309b13368", - "format": 1 - }, - { - "name": "tests/integration/targets/keycloak_client/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/keycloak_client/vars/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4f761bb393b96d75911e13b573f5cb4a7c131df8f4737c734a14766bd59c631e", - "format": 1 - }, - { - "name": "tests/integration/targets/keycloak_client/README.md", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "290c1e7d6698f4a13ddb9112a9e01e68458bb06e5ce25e8cb892a9720bf96b64", - "format": 1 - }, - { - "name": "tests/integration/targets/keycloak_client/docker-compose.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "875f6faa01cccd6194195ad2b11ee47cc83592408856ff402b7e4f84fb661a4b", - "format": 1 - }, - { - "name": "tests/integration/targets/keycloak_identity_provider", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/keycloak_identity_provider/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/keycloak_identity_provider/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "479980d4962c6bb999c685961ec9cfb599e11c63bd04d11cb68086e303a9f73e", - "format": 1 - }, - { - "name": "tests/integration/targets/keycloak_identity_provider/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/keycloak_identity_provider/vars/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1eccd2eefe0879171c7c4d21953f58c2d42254373390b99fc1ac633e02d56c49", - "format": 1 - }, - { - "name": "tests/integration/targets/keycloak_identity_provider/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656", - "format": 1 - }, - { - "name": "tests/integration/targets/keycloak_role", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/keycloak_role/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/keycloak_role/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "80f441cdc22c348f50e8ffea42b0aafe895de50b767d25a99832fe11d809a0b0", - "format": 1 - }, - { - "name": "tests/integration/targets/keycloak_role/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/keycloak_role/vars/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a95051d0d8259f0ae32235ed669d7bb89db37e49a0bd3163baac7ebacaa38f29", - "format": 1 - }, - { - "name": "tests/integration/targets/keycloak_role/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656", - "format": 1 - }, - { - "name": "tests/integration/targets/keycloak_user_federation", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/keycloak_user_federation/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/keycloak_user_federation/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d48160447515c3c470044be9eb97c2447e5dfe308301065344c17edfcf89eada", - "format": 1 - }, - { - "name": "tests/integration/targets/keycloak_user_federation/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/keycloak_user_federation/vars/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "684833062e98a096c4980af186b03f7ed9991c5e03e3d759f7434575b1d7597a", - "format": 1 - }, - { - "name": "tests/integration/targets/keycloak_user_federation/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656", - "format": 1 - }, - { - "name": "tests/integration/targets/launchd", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/launchd/files", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/launchd/files/ansible_test_service.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d10b0fcb1c6ec6b0e5250821e1ee5a2782e34cad5327b37660eb457400b455bb", - "format": 1 - }, - { - "name": "tests/integration/targets/launchd/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/launchd/tasks/tests", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/launchd/tasks/tests/test_reload.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b20f13d5ee7a2aa15015a8155b3529d0b1a2cebb1d49dd5d7465bb37874c4188", - "format": 1 - }, - { - "name": "tests/integration/targets/launchd/tasks/tests/test_restart.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0c1d201bbda5261054ea2665e5d2e6f31a61c07529a839a14958ad71d58a59d2", - "format": 1 - }, - { - "name": "tests/integration/targets/launchd/tasks/tests/test_runatload.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8299cde6cf41dfe095828bf838e72b4f96743f5fd6d4a06690f99b350f29fc61", - "format": 1 - }, - { - "name": "tests/integration/targets/launchd/tasks/tests/test_start_stop.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "523ce654be36680e551275021267b27a14956266b06feaad9755f2b0609971c9", - "format": 1 - }, - { - "name": "tests/integration/targets/launchd/tasks/tests/test_unknown.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cd41fbcd89bce7da78fc8f6a1852e489a529c82531d06b6fefcc62ec58b55db7", - "format": 1 - }, - { - "name": "tests/integration/targets/launchd/tasks/tests/test_unload.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "98aff4473c41233d041f404c30be7db975f6b552acb22a9f2cfdee5c139f199e", - "format": 1 - }, - { - "name": "tests/integration/targets/launchd/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "785e14037edd887fc59a7cb2bf4d384dc6b67d54eb2a3683a133056b6efa3bd1", - "format": 1 - }, - { - "name": "tests/integration/targets/launchd/tasks/setup.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "df6d13c8c4fa9d93a8d958e577c8e92b7789ea31806835c15866047bddf1f82b", - "format": 1 - }, - { - "name": "tests/integration/targets/launchd/tasks/teardown.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d982202029c12820d4d29bd3d35d5568f7370d0d8fe292e81e2779da393c8af9", - "format": 1 - }, - { - "name": "tests/integration/targets/launchd/tasks/test.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "13c2574fda72b4a025e49f7eb748cd9242765bd62b9572d35da396a74705e05e", - "format": 1 - }, - { - "name": "tests/integration/targets/launchd/templates", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/launchd/templates/launchd.test.service.plist.j2", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3b2aa7254c3c493666745f80868f5eed3ea63089ded01c1d45a97a84338c8585", - "format": 1 - }, - { - "name": "tests/integration/targets/launchd/templates/modified.launchd.test.service.plist.j2", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ab2ed9efd43886160eb67c8a7d31b949c203616466c2a6762604ff2074292406", - "format": 1 - }, - { - "name": "tests/integration/targets/launchd/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/launchd/vars/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7568f2a55857df4d6b83e7be5dd251131e4e092f6be4e74e479a85280ff9a1ff", - "format": 1 - }, - { - "name": "tests/integration/targets/launchd/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "75ad1ca2f920de9b6df6fecc06134026eb2f9e59cd559c3cdb214e88c93f03d1", - "format": 1 - }, - { - "name": "tests/integration/targets/ldap_search", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/ldap_search/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/ldap_search/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d5d4da7d75a6e80bc78241b575d83e252dcbe32f9fd3770e05d808d896dd6f31", - "format": 1 - }, - { - "name": "tests/integration/targets/ldap_search/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/ldap_search/tasks/tests", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/ldap_search/tasks/tests/basic.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6b3ddab041bd891f70f2d3d8c83c7480aab5fc5654165f2e3cb4a96bb2e1a5e9", - "format": 1 - }, - { - "name": "tests/integration/targets/ldap_search/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "aa78306d233b0651c0e88c40ebc4974b38f6ff3aec34f344413a4db4ee3e785b", - "format": 1 - }, - { - "name": "tests/integration/targets/ldap_search/tasks/run-test.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/integration/targets/ldap_search/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d8f4c57a73a1bb071fa3c931f0cddbb6b8dd7ce48e4e1afcfbab2a6d1e915482", - "format": 1 - }, - { - "name": "tests/integration/targets/listen_ports_facts", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/listen_ports_facts/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/listen_ports_facts/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c2af6405d42a5abd438f4a256a6a5c19b8fb5eb5559209c0cbabd150af911837", - "format": 1 - }, - { - "name": "tests/integration/targets/listen_ports_facts/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/listen_ports_facts/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d8c8b38f02715e310477cd9002218a7e69f2ed4ce3061f6312386d799fa0eb9d", - "format": 1 - }, - { - "name": "tests/integration/targets/listen_ports_facts/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "59fc7c3959a7cbc9ff73abdf90f8fa853c52ff56602f548faacdda0b5bc3d485", - "format": 1 - }, - { - "name": "tests/integration/targets/locale_gen", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/locale_gen/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/locale_gen/tasks/locale_gen.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7ee978f272037425567d6e2051c8ec5a0f263d25a81bfb4ced417aeef52a4147", - "format": 1 - }, - { - "name": "tests/integration/targets/locale_gen/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ee234de7e912e0b7b6aa49643af769675600152350ae20862abe8d38d62f5976", - "format": 1 - }, - { - "name": "tests/integration/targets/locale_gen/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c6fe5ba544a3a858f7921ead134971c6b094f597106f6c621ea21ab4608ba5f0", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_cartesian", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_cartesian/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_cartesian/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bb854c1b495c50987895fd8a267dffe50c4521cf73d6018df77d511c915930a6", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_cartesian/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a448d01a35a1c112f7d370e8a29da2e5d960c967c9c80f2322450041aca81da7", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_collection_version", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_collection_version/collections", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll/plugins", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll/plugins/modules", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll/plugins/modules/collection_module.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c3f0114d080c409c58c8846be8da7b91137b38eaf2d24f72a4a61a303f925f4d", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll/galaxy.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "69c0cb85c493f4a56758eb814a9c36104cf36c449a0e54d1a6b4b72bbda01ec1", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/plugins", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/plugins/modules", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/plugins/modules/collection_module.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c3f0114d080c409c58c8846be8da7b91137b38eaf2d24f72a4a61a303f925f4d", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/FILES.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "025818f18fcae5c9f78d778ae6e246ecffed6d56a886ffbc145cb66d54e9951e", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/MANIFEST.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8692610cecf8948d2a62fcb3b22f56c0fdee4e2d9d8e69a8cd3571a01cb724dc", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/README.md", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nothing", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nothing/plugins", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nothing/plugins/modules", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nothing/plugins/modules/collection_module.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c3f0114d080c409c58c8846be8da7b91137b38eaf2d24f72a4a61a303f925f4d", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nv", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nv/plugins", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nv/plugins/modules", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nv/plugins/modules/collection_module.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c3f0114d080c409c58c8846be8da7b91137b38eaf2d24f72a4a61a303f925f4d", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nv/galaxy.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9c3ae898ca973c0b55357648796561a08a11b914469d9b8bab4488bde9448b6f", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_collection_version/library", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_collection_version/library/local_module.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "853a4708a3d35eec2ffe537982f938eb947da2faf1b405a4690b5b6a2ed5dc7c", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_collection_version/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8a5657d4075cac91b7e2fc1ffaaa04d37136cc64e51c75c2d261147519bb12d6", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_collection_version/runme.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "737ebeb3884ca0395d497b4d357dfff97dcd1463a494b2fbb2e8dfaaf1b4251a", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_collection_version/runme.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4f807dd9c15cbc744a767651439de60fb743e716433196f25db62259bee61f06", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_dependent", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_dependent/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_dependent/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "807e9e08b8a7aa88d7062411b826bc8e6857f4b44a3792336d57cb0c793cb880", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_dependent/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "695e4b8fa8a5286051c058d1844092e58bc1550040a7c82112912d2d2b870ac1", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_etcd3", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_etcd3/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_etcd3/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "60c4ec43079a8d934d7f8c21cf902cbddf88d6d43432f45acf00a06804209ff5", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_etcd3/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_etcd3/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_etcd3/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_etcd3/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4ecde298d2c5c606796c2d9d6786ac9dc0c81f99f59a501b45fcd037ea36e13d", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_etcd3/tasks/tests.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a7e80348f1c6c2dde46f749d14d6098983cd5586973b52fddb99c4ff0494cc5b", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_etcd3/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d8415b9a44e521d868b65251bb6810a29a0d3cd513751441e932fd84bf3b461b", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_etcd3/dependencies.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f30e183aa0eb77ea5830ce9cc7166cc93b874663a4247caa67bff8144641490c", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_etcd3/runme.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0ef51b03fc4c317f790f7717b8134a950749cef70f871d5efe3ab2762e0ed15f", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_etcd3/test_lookup_etcd3.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2871df47541b86d0adb6a4444eb01df5ab1124c1dae75c3ec7d8d0513ea093ac", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_flattened", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_flattened/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_flattened/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1374dbaac0fbbb8c550643883dc61e1346375901fe96cf98b1366d2301261384", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_flattened/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "49ba587b44048f1af400f126684ddaae01111f2d3c8d7a3587b7387daf5b9f92", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_lmdb_kv", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_lmdb_kv/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "991cb87e99f0e7f52f7416a83c33bb6eea974c5d884023b0b57c4f99bc70a37b", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_lmdb_kv/dependencies.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2b937926bdd526ef463973b166cde34c884fa779bd898b6f63e901ed7c0f62d5", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_lmdb_kv/runme.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d63607a90c412835ad16889e23ffb58bf589878980a7ff2ede8fe06dde514d73", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_lmdb_kv/test.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a2a322dfe5df22a6407fcf5b91572ec5b232d97d8b4ba1a6af45bc4fd7baaa60", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_lmdb_kv/test_db.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f340fcdcb004de2205af050b6077e89bb71f1ce08b972c4deafa7e45bc3809b2", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_passwordstore", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_passwordstore/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_passwordstore/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_passwordstore/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_passwordstore/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b3735d01e792324eb746207de73f90bd1bd83ee3aeda65e6d733d270f86c5ffc", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_passwordstore/tasks/package.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e4bdb24595075fcad9c9f2340e03bbcb9906cc12ef1429178df485b48623ac4a", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_passwordstore/tasks/tests.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "46aa3ae28612cfc2d26e4e562b960ffc37fb7f8ae726aa3d345c93b4d065374e", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_passwordstore/templates", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_passwordstore/templates/input", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9b5f9a20d102714792f1cc5d2eb6b87ae0379b2ce632d3ea1bd983b13a2d819f", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_passwordstore/templates/security-privacy.repo.j2", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "00b89accfb1e13ae32acd82783723e64793a7d4870461b042fecdf80d9dfab0c", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_passwordstore/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_passwordstore/vars/Alpine.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a867b420471083523fa4867d401e2709b60258caf7ba1a5069049c809158277e", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_passwordstore/vars/Archlinux.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a867b420471083523fa4867d401e2709b60258caf7ba1a5069049c809158277e", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_passwordstore/vars/Debian.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a867b420471083523fa4867d401e2709b60258caf7ba1a5069049c809158277e", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_passwordstore/vars/Fedora.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a867b420471083523fa4867d401e2709b60258caf7ba1a5069049c809158277e", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_passwordstore/vars/FreeBSD.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f341735e70e95d1e7b23ea160010b148bef2e38f2e58daf9fbf1131318763fc6", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_passwordstore/vars/default.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_passwordstore/vars/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "29b468e6a619569af108d581b1fc0099511cea7bfeacd2e771817abfcc17db83", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_passwordstore/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "10316af7c1503e4b04d885baab5bc61ecf51c4b073a4c5b4078b8bf9772a1535", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_random_pet", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_random_pet/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "49ba587b44048f1af400f126684ddaae01111f2d3c8d7a3587b7387daf5b9f92", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_random_pet/dependencies.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6e867200e5243183bfc0d00303ed73c82179449721c3b536cc28cdbc451a51b0", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_random_pet/runme.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d63607a90c412835ad16889e23ffb58bf589878980a7ff2ede8fe06dde514d73", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_random_pet/test.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a6ebdf42ae20e632330b2159cbaa2b0d6ec2731dd8dd62cf873ee18f83b119d0", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_random_string", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_random_string/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "49ba587b44048f1af400f126684ddaae01111f2d3c8d7a3587b7387daf5b9f92", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_random_string/runme.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "de720cc8d59a01261c316a93c01f6a2906ab6d8e042d2487350b7c748aa2ff8a", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_random_string/test.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "54f5c7b625741fe234715e821492938834905f6dde23d86aa922af79c09d966f", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_random_words", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_random_words/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "49ba587b44048f1af400f126684ddaae01111f2d3c8d7a3587b7387daf5b9f92", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_random_words/dependencies.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2950a0d6312c16a08102c19a68801298007bdc538f84f36ce599b20c76eacb84", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_random_words/runme.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d63607a90c412835ad16889e23ffb58bf589878980a7ff2ede8fe06dde514d73", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_random_words/test.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "32511a0bfba3767e12ad08903c8a90ed51c9af627b3b020fba5e626bfd609f34", - "format": 1 - }, - { - "name": "tests/integration/targets/lvg", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lvg/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lvg/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "769fd1fe7bec847f2f097de2fc87ddc3062a542f24ea15a783fcea9e18b038f7", - "format": 1 - }, - { - "name": "tests/integration/targets/lvg/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/lvg/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "73c27a48b9413acda1ebeb7276920d77ed66258741e3f7c8b95424dda21bb5c7", - "format": 1 - }, - { - "name": "tests/integration/targets/lvg/tasks/setup.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "aa736d1ebed4a320d02af6397a292c44f243acb4738dba4798ff8ea8920b4de7", - "format": 1 - }, - { - "name": "tests/integration/targets/lvg/tasks/teardown.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f9cd523d7c8218d7b4f296ec486d2d933d4c3e9051d4b25e6f1251bf70462e71", - "format": 1 - }, - { - "name": "tests/integration/targets/lvg/tasks/test_grow_reduce.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "564cf97c44900d4e3c5a3148a900b4577927179cdf8732d6958cea9862c88ac6", - "format": 1 - }, - { - "name": "tests/integration/targets/lvg/tasks/test_indempotency.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "acda7d6e98f33b902a2f78d1389d57a6b268b0f1dd04b68e615603c358a6bab6", - "format": 1 - }, - { - "name": "tests/integration/targets/lvg/tasks/test_pvresize.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "10f5e2906836542bd73b121fcf6d93b69395e142f103c1abf754af5d5fcac44f", - "format": 1 - }, - { - "name": "tests/integration/targets/lvg/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9d74111e18f3d38965d19bda2998ddc9bfe25a34dbf5c26c2700c70c0f645453", - "format": 1 - }, - { - "name": "tests/integration/targets/mail", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/mail/files", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/mail/files/smtpserver.crt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a55d9996272afcf7ba2064e9d7ca4c77617a405966cab5157da80566482431d0", - "format": 1 - }, - { - "name": "tests/integration/targets/mail/files/smtpserver.key", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0895aca337049f7faddb275b6e8a81ae0e8285fc9388f2d96e9b970a0c31541e", - "format": 1 - }, - { - "name": "tests/integration/targets/mail/files/smtpserver.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "07ce913cff3a6a186774bb3ac87b9d195f650141abdbc26e7bcb52777392fc9b", - "format": 1 - }, - { - "name": "tests/integration/targets/mail/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/mail/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1e8d632f9db7209967c5b2f6d734bede09841acc7b898dafc19f31c72cee9929", - "format": 1 - }, - { - "name": "tests/integration/targets/mail/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/mail/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "27c7a92b984df15bd6b134295d99c0c1615823f91bfa3b3106cf9d5aa9033aa5", - "format": 1 - }, - { - "name": "tests/integration/targets/mail/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "811322fad85866ac31f9501f8da0c7072809065397d53c06712447946b746b6b", - "format": 1 - }, - { - "name": "tests/integration/targets/mas", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/mas/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/mas/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1133fa36703e51b978dee3be6f6e7e291ea0e744208c39ae9088b5ddaac49d6b", - "format": 1 - }, - { - "name": "tests/integration/targets/mas/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "95b0a7b5f17cd3772fc4b68faeb81e6cb6b459013f4b5c91a2812d06afc31e2f", - "format": 1 - }, - { - "name": "tests/integration/targets/memset_dns_reload", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/memset_dns_reload/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/memset_dns_reload/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cb3f91d54eee30e53e35b2b99905f70f169ed549fd78909d3dac2defc9ed8d3b", - "format": 1 - }, - { - "name": "tests/integration/targets/memset_dns_reload/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/memset_dns_reload/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "04d09b3332b47f6b7c88e028e61eb609d9619f2348f72e06822082cd472113df", - "format": 1 - }, - { - "name": "tests/integration/targets/memset_dns_reload/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656", - "format": 1 - }, - { - "name": "tests/integration/targets/memset_memstore_info", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/memset_memstore_info/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/memset_memstore_info/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cb3f91d54eee30e53e35b2b99905f70f169ed549fd78909d3dac2defc9ed8d3b", - "format": 1 - }, - { - "name": "tests/integration/targets/memset_memstore_info/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/memset_memstore_info/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c6cc39e44674cdd97f45755136cc976294e1df35be4128970367159ceaaee3c8", - "format": 1 - }, - { - "name": "tests/integration/targets/memset_memstore_info/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656", - "format": 1 - }, - { - "name": "tests/integration/targets/memset_server_info", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/memset_server_info/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/memset_server_info/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cb3f91d54eee30e53e35b2b99905f70f169ed549fd78909d3dac2defc9ed8d3b", - "format": 1 - }, - { - "name": "tests/integration/targets/memset_server_info/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/memset_server_info/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c80257da29e1d730786a3377fd78936022c5e532bca726dc8c4110c0eb935428", - "format": 1 - }, - { - "name": "tests/integration/targets/memset_server_info/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656", - "format": 1 - }, - { - "name": "tests/integration/targets/memset_zone", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/memset_zone/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/memset_zone/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090", - "format": 1 - }, - { - "name": "tests/integration/targets/memset_zone/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/memset_zone/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1e6779c3721824e477ee2b1075957ea75de4985e790631967c6e404bf963a7c3", - "format": 1 - }, - { - "name": "tests/integration/targets/memset_zone/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/memset_zone/vars/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4c81bfc3449bf48a59e3ee5bfb16dce121eee7001770bfe843233684a4a09d82", - "format": 1 - }, - { - "name": "tests/integration/targets/memset_zone/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656", - "format": 1 - }, - { - "name": "tests/integration/targets/memset_zone_domain", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/memset_zone_domain/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/memset_zone_domain/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cb3f91d54eee30e53e35b2b99905f70f169ed549fd78909d3dac2defc9ed8d3b", - "format": 1 - }, - { - "name": "tests/integration/targets/memset_zone_domain/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/memset_zone_domain/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bf5ffdd7f49c858218d24c2b92b4b0cad7c308e2b91c8aa7f9aa26c42728b525", - "format": 1 - }, - { - "name": "tests/integration/targets/memset_zone_domain/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/memset_zone_domain/vars/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2a37de1a0afb6552501549e1f955115730ca1ed0e60cf18f07b89704b749ae96", - "format": 1 - }, - { - "name": "tests/integration/targets/memset_zone_domain/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656", - "format": 1 - }, - { - "name": "tests/integration/targets/memset_zone_record", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/memset_zone_record/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/memset_zone_record/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090", - "format": 1 - }, - { - "name": "tests/integration/targets/memset_zone_record/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/memset_zone_record/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "eb0b5904368bb749f72ab0dccb6452fd5b92903763c9c7b02c654ea2a254d9d7", - "format": 1 - }, - { - "name": "tests/integration/targets/memset_zone_record/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/memset_zone_record/vars/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1dab77163c3e164596e15c1eb1658f88196dcc1e92f38f067c652d3c27756025", - "format": 1 - }, - { - "name": "tests/integration/targets/memset_zone_record/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656", - "format": 1 - }, - { - "name": "tests/integration/targets/module_helper", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/module_helper/library", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/module_helper/library/mdepfail.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ffde27e3ef28041d0cf5826ff7395267a37a1d2bbe648802088839649b2d15b8", - "format": 1 - }, - { - "name": "tests/integration/targets/module_helper/library/msimple.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "db49c81d8bf650fe23c5cece2017d1faf94809ee8a39854f16cc5c0e87b87701", - "format": 1 - }, - { - "name": "tests/integration/targets/module_helper/library/msimpleda.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3f96a70bdcaa67a0aa9c1755ae8f0b28d2fbef24414d265b929d6086c6e4e4a1", - "format": 1 - }, - { - "name": "tests/integration/targets/module_helper/library/mstate.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "010b5c1b2c0180c9acfb7779f0fc4abc21128ac5e3598bda62415ae5761cd687", - "format": 1 - }, - { - "name": "tests/integration/targets/module_helper/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/module_helper/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f192756d5450841cb56dd181ecd4b640ce5835fb4918a4829a94d2f9625e07ff", - "format": 1 - }, - { - "name": "tests/integration/targets/module_helper/tasks/mdepfail.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0aedede136fa52bdd76262e978f30a185223c8291df5b36f301d7008c1fa18fb", - "format": 1 - }, - { - "name": "tests/integration/targets/module_helper/tasks/msimple.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "340b40190549cc359558857ec33b68842c2f00b74598b47e2a800450f08783fe", - "format": 1 - }, - { - "name": "tests/integration/targets/module_helper/tasks/msimpleda.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "95510ddd0bea47f218c8865c589ea83d02213f79e78f99bfb1d948b580cf7abc", - "format": 1 - }, - { - "name": "tests/integration/targets/module_helper/tasks/mstate.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1bff596c9a017abe3f7ef1279ca5d0d602d797c27230a96272d00ec6400330a0", - "format": 1 - }, - { - "name": "tests/integration/targets/module_helper/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e5a8866bb35edfbfde89676bc1698a7dd27c89530da249de619169892e7482d1", - "format": 1 - }, - { - "name": "tests/integration/targets/monit", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/monit/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/monit/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8c965891bd86e77ca17e9eb381554259b5be20540a005a4024bd405a7b398ec1", - "format": 1 - }, - { - "name": "tests/integration/targets/monit/files", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/monit/files/httpd_echo.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9c7a5c16f2ae6c611edd538f3d0549f89db33015ee5e7cb9193b60d0b39540c7", - "format": 1 - }, - { - "name": "tests/integration/targets/monit/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/monit/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a", - "format": 1 - }, - { - "name": "tests/integration/targets/monit/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/monit/tasks/check_state.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e08af6eb7cf52766f08ed7e4cc9ac32c9974eae8e8c47203e9fbf89337826377", - "format": 1 - }, - { - "name": "tests/integration/targets/monit/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b71412a837cef2ebcc536d77efe1d5146ef33526791fd54c2868eb017549b541", - "format": 1 - }, - { - "name": "tests/integration/targets/monit/tasks/test.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "77856e3d9b710a347000a4df4d7fae6622f56bbcfe71a200b114f643bd2bf594", - "format": 1 - }, - { - "name": "tests/integration/targets/monit/tasks/test_errors.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7fbd04ef7bf73505329dd0340d28330b1dd8f80564e649a3d217844359f9d3c4", - "format": 1 - }, - { - "name": "tests/integration/targets/monit/tasks/test_reload_present.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ce236af6a0d981a66b001afbfcd7d45d7544a4397739ed45b256e9c5fc94da81", - "format": 1 - }, - { - "name": "tests/integration/targets/monit/tasks/test_state.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cf46c8a28d3089d91b851add3f68540830af7281cd848b64fb1e21677cdcb8b3", - "format": 1 - }, - { - "name": "tests/integration/targets/monit/templates", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/monit/templates/monitrc.j2", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9de9c9c884d2080f8ec61a09fee4e1b493cd6f76f669bc866daaa1637c3b16c8", - "format": 1 - }, - { - "name": "tests/integration/targets/monit/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/monit/vars/Alpine.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1eebd4e36416221a18f3948f6b64cde3c9ecc7de4a3873cc4650232cb4eccf4f", - "format": 1 - }, - { - "name": "tests/integration/targets/monit/vars/Archlinux.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1eebd4e36416221a18f3948f6b64cde3c9ecc7de4a3873cc4650232cb4eccf4f", - "format": 1 - }, - { - "name": "tests/integration/targets/monit/vars/CentOS-6.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0116067b4db69e9e5ff4178810fb268021a7d89787f3fe692863d78a4977362c", - "format": 1 - }, - { - "name": "tests/integration/targets/monit/vars/RedHat.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1eebd4e36416221a18f3948f6b64cde3c9ecc7de4a3873cc4650232cb4eccf4f", - "format": 1 - }, - { - "name": "tests/integration/targets/monit/vars/Suse.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1eebd4e36416221a18f3948f6b64cde3c9ecc7de4a3873cc4650232cb4eccf4f", - "format": 1 - }, - { - "name": "tests/integration/targets/monit/vars/defaults.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fd17ec0f116ab194e2dbd313fca5de30362d09be9b48ed4d87bdaa7df5ed4f7a", - "format": 1 - }, - { - "name": "tests/integration/targets/monit/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f50efd00db8c2d6177833ea2ff8b473fc4656571203581300af933f6f30f9635", - "format": 1 - }, - { - "name": "tests/integration/targets/mqtt", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/mqtt/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/mqtt/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bcefe5429771d37b3d344153395093b33a0ecd10d8fd9968af8ee0d63684899b", - "format": 1 - }, - { - "name": "tests/integration/targets/mqtt/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/mqtt/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a1807cb88a11eec71eeb260bc7a744c740f81256fe80f6a37a2c55de020fcb79", - "format": 1 - }, - { - "name": "tests/integration/targets/mqtt/tasks/ubuntu.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a581edcff230b2c1df3d2fdb921ed8c6a511ec27de41be2b02078e092d314688", - "format": 1 - }, - { - "name": "tests/integration/targets/mqtt/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9ff7f409a177587a8bb388e3940800f5817072d7bb6ed5288c9c4445536be484", - "format": 1 - }, - { - "name": "tests/integration/targets/mssql_script", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/mssql_script/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/mssql_script/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a69057ec4212ddd879447db0cb98c105b7f138c89a13f40c186dd8eaf87f66a3", - "format": 1 - }, - { - "name": "tests/integration/targets/mssql_script/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/mssql_script/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c04d6c95c17d90a7ba3c4c161f90be9faa916e8acbb77eeea69f54e14f695da8", - "format": 1 - }, - { - "name": "tests/integration/targets/mssql_script/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9e395016992b8dfa6def8cd2338465b68d0e735397c07899ca77bf5c0d493467", - "format": 1 - }, - { - "name": "tests/integration/targets/nomad", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/nomad/files", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/nomad/files/job.hcl", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "37e149184dfb3c56a86af62362de71f887b6e3d75b9bb8ffab07bbc4dd8aa2af", - "format": 1 - }, - { - "name": "tests/integration/targets/nomad/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/nomad/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0d7dd9e426571c0334ab74bf3c78984772b5478d423fd107c01c504bda6ddb22", - "format": 1 - }, - { - "name": "tests/integration/targets/nomad/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/nomad/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8c12e85ed9418ebece20dbbe3adde959d8e0711da29277df9fb082cd8701f92b", - "format": 1 - }, - { - "name": "tests/integration/targets/nomad/tasks/nomad_job.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c3c067f9a1599dcc8ae567507fd85091739acd42c409712cc7ff80261f8778a5", - "format": 1 - }, - { - "name": "tests/integration/targets/nomad/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "11923455942cc5d6bf1e89cfec52d38ce22ed832752a317d9906562d6986b98b", - "format": 1 - }, - { - "name": "tests/integration/targets/npm", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/npm/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/npm/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d8fc45c0fa92f9e855eaef341b5d10d9ee2630f93bc4bd041a943aa8c1169b3d", - "format": 1 - }, - { - "name": "tests/integration/targets/npm/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/npm/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2f69f96675c5d909074d14b469fd11b5d0799710cc39d78b0700726753b49421", - "format": 1 - }, - { - "name": "tests/integration/targets/npm/tasks/no_bin_links.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5c6cc3371774a8379e1e2a2192af509387a4877d18b07a369ebd433fc1044b79", - "format": 1 - }, - { - "name": "tests/integration/targets/npm/tasks/run.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f30f4b64bdb651d01b7cc458d7269c00d968c236ad8d4f084c6b4cfad7ee4913", - "format": 1 - }, - { - "name": "tests/integration/targets/npm/tasks/setup.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "befd3af1f502a374000746f446419951f16814bf9f2ff5081ed4b6e4dfb1c631", - "format": 1 - }, - { - "name": "tests/integration/targets/npm/tasks/test.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "05eba1a52eb4261b982cbcff041f15300dff20612b7dbf0cfde3d45e8bd5084c", - "format": 1 - }, - { - "name": "tests/integration/targets/npm/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6513e6ddce12ed2e95878f953386ea898ad88bec8085158c8a422d78a03a4a5c", - "format": 1 - }, - { - "name": "tests/integration/targets/odbc", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/odbc/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/odbc/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d9d5d091d0392e99760e265a1af2d157adb0214801f24e96ffc454993d206307", - "format": 1 - }, - { - "name": "tests/integration/targets/odbc/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/odbc/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ebc8c5ac109203353a763337fe5239f104f0a997f4bb80144ce0af08da9fdc6a", - "format": 1 - }, - { - "name": "tests/integration/targets/odbc/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/odbc/tasks/install_pyodbc.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c35a5a838c05ae876ac633ac75a308b3a94f5d34a5ba7e0bed62edbb547e59a0", - "format": 1 - }, - { - "name": "tests/integration/targets/odbc/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3eb64aeb340726077141d3cf9f6700ed30c00b4ee0b8a7fb8f2e47e24aca80e5", - "format": 1 - }, - { - "name": "tests/integration/targets/odbc/tasks/negative_tests.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f655299c1f15bd9f4cfb04cce676fd6aa13f0b0052c18a17c4fe0f2de52b18bf", - "format": 1 - }, - { - "name": "tests/integration/targets/odbc/tasks/no_pyodbc.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "67e03bf567e68aff49728425969ce1179bf82dc5e4ee75bccb88bcfb03e9de81", - "format": 1 - }, - { - "name": "tests/integration/targets/odbc/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7d3670fcf44107e0637dd3ba6cacc778244eadadc4cc233aaa6bbd8be133284b", - "format": 1 - }, - { - "name": "tests/integration/targets/one_host", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/one_host/files", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/one_host/files/testhost", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/one_host/files/testhost/tmp", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/one_host/files/testhost/tmp/opennebula-fixtures.json.gz", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "18dc59bac231e749997e1a5243db69081f63b757fd43c37de017e20d58d010d6", - "format": 1 - }, - { - "name": "tests/integration/targets/one_host/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/one_host/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3b981db212298d31645895377cac39ff68ed0d739270f19443250438ca66c47a", - "format": 1 - }, - { - "name": "tests/integration/targets/one_host/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/one_host/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "59991d28adcde9f920cf1af942fb08bc9204c3ef711016c145d8cd1f35123e65", - "format": 1 - }, - { - "name": "tests/integration/targets/one_host/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "768ebe421dd904b7142848f4bd6c9defa722632d9d4fdddb4a489c8ed755b825", - "format": 1 - }, - { - "name": "tests/integration/targets/one_template", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/one_template/files", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/one_template/files/testhost", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/one_template/files/testhost/tmp", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/one_template/files/testhost/tmp/opennebula-fixtures.json.gz", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0b303d1a0b4deab46c421512a15476493a895061e9d1f71f49ce03d78484e928", - "format": 1 - }, - { - "name": "tests/integration/targets/one_template/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/one_template/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3b981db212298d31645895377cac39ff68ed0d739270f19443250438ca66c47a", - "format": 1 - }, - { - "name": "tests/integration/targets/one_template/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/one_template/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "49b218551e1532d35c5e339addc1d3276bb9fbe167c441e98e1f32d9168afc5e", - "format": 1 - }, - { - "name": "tests/integration/targets/one_template/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "768ebe421dd904b7142848f4bd6c9defa722632d9d4fdddb4a489c8ed755b825", - "format": 1 - }, - { - "name": "tests/integration/targets/osx_defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/osx_defaults/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/osx_defaults/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f5199983dc3924b34cadcc6d04ea5197f9185191c3c4948330008b742cb59e20", - "format": 1 - }, - { - "name": "tests/integration/targets/osx_defaults/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "008395b49feeb25967b9261639f4bac0ce08f7e766019bd16bb5566c2f2035f7", - "format": 1 - }, - { - "name": "tests/integration/targets/pacman", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/pacman/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/pacman/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "89260dedfc1ff941c8d1ef25efee5993bb384dc40b5734ff86e87b7241c33309", - "format": 1 - }, - { - "name": "tests/integration/targets/pacman/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/pacman/tasks/basic.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6529f40764a35e02218e52cb762f6c7057e69ecdc960cdb754c8ea7129d89d68", - "format": 1 - }, - { - "name": "tests/integration/targets/pacman/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e2d083300d0e31d69b5b02293a78c1d69007b17085ff3ba4552150ac11a01b27", - "format": 1 - }, - { - "name": "tests/integration/targets/pacman/tasks/package_urls.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "69b99006f78c48c7d26ed22e6bf9bf2e39b5588424a9202f264472f351ea6604", - "format": 1 - }, - { - "name": "tests/integration/targets/pacman/tasks/remove_nosave.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fb1dc7897bfc4fc9f4e32ce5d056dedbfde8b0b5f59d3c3c8caa11f9c1794bea", - "format": 1 - }, - { - "name": "tests/integration/targets/pacman/tasks/update_cache.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bfdfd1b33760a2b4cc87012556b6cfa2bdc90d2ff3e6bdb4d27c2f9a6e087f70", - "format": 1 - }, - { - "name": "tests/integration/targets/pacman/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d502a4aa4e2eb90aa8ca5b3574abb191b74f24f1f4d15266457c616538367c26", - "format": 1 - }, - { - "name": "tests/integration/targets/pagerduty_user", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/pagerduty_user/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/pagerduty_user/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5feee27e45ba9e467becb074e8789133b40238558c0d0cd4dcc85f50e96017ba", - "format": 1 - }, - { - "name": "tests/integration/targets/pagerduty_user/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/pagerduty_user/vars/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "69321ab157a3edaa675ba5be65214e59d93f82f7db0cf5027c44e157576b7130", - "format": 1 - }, - { - "name": "tests/integration/targets/pagerduty_user/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656", - "format": 1 - }, - { - "name": "tests/integration/targets/pam_limits", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/pam_limits/files", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/pam_limits/files/test_pam_limits.conf", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c5df9a5dd3edde9b71d8b086db25cae0293c425c687da179877ac5bc8b2ffb30", - "format": 1 - }, - { - "name": "tests/integration/targets/pam_limits/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/pam_limits/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7191ea76b8b2d3375e64cd449bd3b51b75edad0dd77f227d65b299c1930c6ce0", - "format": 1 - }, - { - "name": "tests/integration/targets/pam_limits/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7366ec1af83f8212f093362620022f4a3e9d9de63b38c75466df933839cb1138", - "format": 1 - }, - { - "name": "tests/integration/targets/pamd", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/pamd/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/pamd/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5caf759f0397ffe1d1bb62d3f78bce6a86ea757caa6f2ec2197dab1bc3c5a6e8", - "format": 1 - }, - { - "name": "tests/integration/targets/pamd/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7366ec1af83f8212f093362620022f4a3e9d9de63b38c75466df933839cb1138", - "format": 1 - }, - { - "name": "tests/integration/targets/pids", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/pids/files", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/pids/files/obtainpid.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ae4a9adefeed72dabcfc60eef1cfae673b7f0af5a25a4f0df685e431175e0b24", - "format": 1 - }, - { - "name": "tests/integration/targets/pids/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/pids/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1e8d632f9db7209967c5b2f6d734bede09841acc7b898dafc19f31c72cee9929", - "format": 1 - }, - { - "name": "tests/integration/targets/pids/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/pids/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e78297d4c4f795ed758263e06c79cec7f6957b896e3bab5e20d2766c31f6f03f", - "format": 1 - }, - { - "name": "tests/integration/targets/pids/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8a5657d4075cac91b7e2fc1ffaaa04d37136cc64e51c75c2d261147519bb12d6", - "format": 1 - }, - { - "name": "tests/integration/targets/pipx", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/pipx/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/pipx/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0135fea669795f622d1fbff138755111ffe00c8baa17468ff3b8c7167aa56363", - "format": 1 - }, - { - "name": "tests/integration/targets/pipx/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ce7ec9d8af44ff68ad94f4fd44b2962f33fac14fb1e1fb7de9f6cb0ac0fa84f8", - "format": 1 - }, - { - "name": "tests/integration/targets/pkgng", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/pkgng/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/pkgng/tasks/create-outofdate-pkg.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e0d942a40893b08957418783c1d522ee8e7c8e8b07b58bb4b90f011257251aa0", - "format": 1 - }, - { - "name": "tests/integration/targets/pkgng/tasks/freebsd.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e46af856915b539ae659377d87226dc48a046a42997b7d64bd0c67a95e40d050", - "format": 1 - }, - { - "name": "tests/integration/targets/pkgng/tasks/install_single_package.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "30f56b50eef27d80a39e074e6f5379a0e4841700964ee95e0e104f941e93dccf", - "format": 1 - }, - { - "name": "tests/integration/targets/pkgng/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a8eb5a3d187f0980a8e04e51d16c53f7bf7b84f139092ac0d5fb7d463000434d", - "format": 1 - }, - { - "name": "tests/integration/targets/pkgng/tasks/setup-testjail.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f11c00003a95031754025dc7b0257c9b03714667d178951287cc3794895e96d0", - "format": 1 - }, - { - "name": "tests/integration/targets/pkgng/templates", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/pkgng/templates/MANIFEST.json.j2", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f7723b76e9aa5eec55d6911d8c10c742e99e38820ae0f54781373c458ef7379d", - "format": 1 - }, - { - "name": "tests/integration/targets/pkgng/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/pkgng/vars/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "936e3c9e0925512166f7bf2e389848412789b1f2dbd9a6ec719e7b0a75ff11c5", - "format": 1 - }, - { - "name": "tests/integration/targets/pkgng/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "80fcac1bf73988cab479c3db85a45b2bcf71260e19a6e691dca88213b7c83e42", - "format": 1 - }, - { - "name": "tests/integration/targets/pkgutil", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/pkgutil/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/pkgutil/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fa1a76125fc665b4623a4d8720fb03e512cf38d631a2d8593641410b8cf22626", - "format": 1 - }, - { - "name": "tests/integration/targets/pkgutil/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "07e21b4fadf2da7cd49f0116e21341154d4ce15404a5defdf083b921516ee48e", - "format": 1 - }, - { - "name": "tests/integration/targets/proxmox", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/proxmox/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/proxmox/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6699c47bd7fd2f9624f0596548c50a2343f7867d49e2f8ebff7e2af12074ad3e", - "format": 1 - }, - { - "name": "tests/integration/targets/proxmox/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "083522361472fabfc894312a9185f6119978925f58fdc221fd9a79782f3190c8", - "format": 1 - }, - { - "name": "tests/integration/targets/python_requirements_info", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/python_requirements_info/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/python_requirements_info/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "952e2b5d7a62284a42c49502825aab37837fbb7638f4cf56d74cbc0002fa96e8", - "format": 1 - }, - { - "name": "tests/integration/targets/python_requirements_info/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e5a8866bb35edfbfde89676bc1698a7dd27c89530da249de619169892e7482d1", - "format": 1 - }, - { - "name": "tests/integration/targets/read_csv", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/read_csv/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/read_csv/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a68f6f094d91bccee8fbf7e4fe4bf3537d3d142a242548f83e563c91279a7606", - "format": 1 - }, - { - "name": "tests/integration/targets/read_csv/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e5a8866bb35edfbfde89676bc1698a7dd27c89530da249de619169892e7482d1", - "format": 1 - }, - { - "name": "tests/integration/targets/redis_info", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/redis_info/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/redis_info/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "27c8ef451c7c17943869974bc71737291faa7323907811694fb623700ceb3b9b", - "format": 1 - }, - { - "name": "tests/integration/targets/redis_info/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/redis_info/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7875cb86e0e75d552d4840ecc13a97c949eb6daaea319fd2f1301aa88701f593", - "format": 1 - }, - { - "name": "tests/integration/targets/redis_info/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/redis_info/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "11efc219e584c5bf8ad5daa37b3fcc3ce2d8c113cfda448dbba21a2da714ddff", - "format": 1 - }, - { - "name": "tests/integration/targets/redis_info/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "23e223be5470925ec403c01861adf6a9ce98b6627243d6ad2f58385fb4d694f4", - "format": 1 - }, - { - "name": "tests/integration/targets/rundeck", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/rundeck/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/rundeck/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a33d5bfb4706c92f7f057d17e70b799a6464fcceb2c3b304c51b5bf9f6cc8147", - "format": 1 - }, - { - "name": "tests/integration/targets/rundeck/files", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/rundeck/files/test_job.yaml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ec1cd70a835eb80f5b14d948010e16855c7a814458c3ed4568acadec80510bf9", - "format": 1 - }, - { - "name": "tests/integration/targets/rundeck/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/rundeck/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "411c22afce0081c5544732f2174fd78c1c1287451d06be67b49d8779ded9acb0", - "format": 1 - }, - { - "name": "tests/integration/targets/rundeck/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/rundeck/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8d579efa9f70e445ef493e339e550593a57b1d5e3b9d3dc1afee4ac799141070", - "format": 1 - }, - { - "name": "tests/integration/targets/rundeck/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e7e7e1c03c776328595ce59e9cd526b5b09e671204d83909410d91f3af01a344", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_compute", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_compute/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_compute/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1d3a9ff0da05429f37941231a7266e8a09cf2c716007611457b9a63e47226ccb", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_compute/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_compute/tasks/ip.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a3d99eb46240db6fbb210bdf710969729176a4dc0e4185ba51a3a882f054e659", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_compute/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5129fae409fe4b9f3027ad5a696792c2059215cdce03a473beca9ea7638d5891", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_compute/tasks/pagination.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "46ca5a3f14a6f20849abb0fe518a47b5b6a2a1b188c6bcaabd23f50e3ec4c52e", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_compute/tasks/security_group.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "68f1db98323b94ed628040877036b264158152279fe11a7ef659b6ea237980b0", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_compute/tasks/state.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "50868f360a93cdabe4951870e52df684d59d09ea5556b41e4a37e6db2e7be4ce", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_compute/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_database_backup", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_database_backup/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_database_backup/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "853493acec7a353030441990b6da91b04005237db30a3475a7782e568397ef78", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_database_backup/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_database_backup/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1c8c7c1f542d472c44c6e7da02d21be33974e90f6dff27f359ce7241b1afb693", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_database_backup/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a8e636666bf86c5da11a1a862f00b9f523f3ec9d400239b99a47df4474fec963", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_image_info", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_image_info/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_image_info/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7c3985e1f44c3c44321b6c0a2d578a12d898521f5c81f591b5669b7a022721d3", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_image_info/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_ip", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_ip/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_ip/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f6f0b020f9c3d4b1b4e1a14919644cc6c469401d2e7fe4ff57c75dfc3e366131", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_ip/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_ip/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ce9f220650b042c8eb5b61a02904715882335a702d41b7f5c99d1e907c8daff3", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_ip/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_ip_info", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_ip_info/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_ip_info/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7484dbefebee0ff6f0045d863158fac6e302433247d115f0e8144be531428ff1", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_ip_info/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_lb", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_lb/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_lb/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "64a63db5e3fcdd9491ae50fb45f1e01dbcbf5e8f5d52a89df9ff132226234f63", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_lb/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_lb/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "818c3c4aa67f6b54b1258d8594b60453f3383570316896f970fae6f5aee19222", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_lb/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_organization_info", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_organization_info/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_organization_info/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cf1e66b4ef6c0a2a7d97b312278fad5d954953dbb878a4574b79706fee898aa1", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_organization_info/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_security_group", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_security_group/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_security_group/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8253d2c996e5fb60ecf54fcd9c13f8a15df898dd60f95c41aa2638bb34e0dfb4", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_security_group/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_security_group/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a812db5681f6477d26fd7955c0288e14da67cb1f5b151d3658e1b51324b9434d", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_security_group/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_security_group_info", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_security_group_info/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_security_group_info/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c3bcbd121e3abea9835e7f51e81b2da5a98e346a3d80e1850726ea23910201b2", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_security_group_info/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_security_group_rule", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_security_group_rule/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_security_group_rule/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "98a9538a4d2fc1f89399150252e84060730fb20d3e34d5eca3cc91b8fe4165d3", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_security_group_rule/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_security_group_rule/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1e2cbb083babb3647ae0eb48848b4f2b27e69f56930046dd5f15ce4c7c99cac0", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_security_group_rule/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_server_info", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_server_info/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_server_info/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bd1f9b47cdd018934487d38089de553fb3b43ee71400906964004afd774aae2e", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_server_info/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_snapshot_info", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_snapshot_info/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_snapshot_info/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5a16f6e0af308cfd4dfc3843e498e5f6729990bef5c5ffc0b682e4e017bab314", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_snapshot_info/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_sshkey", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_sshkey/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_sshkey/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f6bdfb6b06c04764726c7a1ee3c97ac38476d2fe0d21de694a7f43d92ac48c20", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_sshkey/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_user_data", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_user_data/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_user_data/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f6cc6d53c9dad7749fa6cbac4a8201d0d26355ad194e184c5148a22d048d2e0e", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_user_data/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_user_data/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c1e1f3fe109406b19b4c82eaec06c7fdeabc3e3e627eff6961859dd8d6f28366", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_user_data/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_volume", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_volume/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_volume/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8253d2c996e5fb60ecf54fcd9c13f8a15df898dd60f95c41aa2638bb34e0dfb4", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_volume/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_volume/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bc48f489deef2cff35e3b1fb618c6350ef36bf4b8f9848ef27787ff2992d7b9d", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_volume/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_volume_info", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_volume_info/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_volume_info/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "292b61a61eb9f906fe8341153f7608522fb698fb0509ecd5b3671e3d53de5789", - "format": 1 - }, - { - "name": "tests/integration/targets/scaleway_volume_info/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617", - "format": 1 - }, - { - "name": "tests/integration/targets/sefcontext", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/sefcontext/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/sefcontext/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a", - "format": 1 - }, - { - "name": "tests/integration/targets/sefcontext/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/sefcontext/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3e917715fd6de57a163d1c2f41bea7b96e23e2ad34496f175fa069f1264988d7", - "format": 1 - }, - { - "name": "tests/integration/targets/sefcontext/tasks/sefcontext.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8b153e2c6e76b42e11ce0dc3efc209a845402c6cf3d1b17fd9934e8a1aa2088c", - "format": 1 - }, - { - "name": "tests/integration/targets/sefcontext/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "467b9bc1e410a98e565e4903966195b8b9a9d8c76e1f88bff6b1724369d244fa", - "format": 1 - }, - { - "name": "tests/integration/targets/sensu_client", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/sensu_client/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/sensu_client/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fb7ca5e8206e2e10de97e975dc8f5de3cd43ebe27acb5eea3dba31132db7a10f", - "format": 1 - }, - { - "name": "tests/integration/targets/sensu_client/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c742157c2d638e509d901a06509289c0a19da50f514e2f059abb93d9f492d88f", - "format": 1 - }, - { - "name": "tests/integration/targets/sensu_handler", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/sensu_handler/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/sensu_handler/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "89b86a747d9095c1bceea2748aece92c504e4409ce53236c9898a64a3774a100", - "format": 1 - }, - { - "name": "tests/integration/targets/sensu_handler/tasks/pipe.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5b067919a3bee60aea22d1fbca8cfb57b99a8862d272c38f976a903ed8316d9b", - "format": 1 - }, - { - "name": "tests/integration/targets/sensu_handler/tasks/set.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "84cd589d9a596950c2360a26668980de7174d7bcbff08df6039ec310c578f5ef", - "format": 1 - }, - { - "name": "tests/integration/targets/sensu_handler/tasks/tcp.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "63e32525bd29b6c499bd01a0a3804c267f6c71de066c86a1fe6c796d59ee0c75", - "format": 1 - }, - { - "name": "tests/integration/targets/sensu_handler/tasks/transport.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "64757ff83593f669dfc25bc41c1abb935ecb8587841be41f2dffb01296f76250", - "format": 1 - }, - { - "name": "tests/integration/targets/sensu_handler/tasks/udp.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6e280697b30f3fbcd859f3f561d34cb51cff5acb3eccbfa8ba9b1598a032e860", - "format": 1 - }, - { - "name": "tests/integration/targets/sensu_handler/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c742157c2d638e509d901a06509289c0a19da50f514e2f059abb93d9f492d88f", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_cron", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_cron/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_cron/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4a7a029ca849c93b2891e0e35afb1ae117b815ff0c6696e4fa7d239b5a37bd47", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_cron/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_cron/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_cron/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_cron/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e7676624791dec0e0e6c8ffcd677435ae9a1f02c52eaeb7daa7ba03b72d4c52d", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_cron/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_cron/vars/alpine.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9534ceabdafc0d6e7f20f96e64bce87cea6c93ff4fa4d6a9ac078f854631658a", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_cron/vars/archlinux.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "35491ab41fd690530bc6e3f1c7f17258001acf056c113d9f2e50b3824a204046", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_cron/vars/debian.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "aef7e744c306a83637e9395ff2f7aa375b2337fb8bf8b7656597f585ea469f11", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_cron/vars/default.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_cron/vars/fedora.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1f904e1f682ddb4e1dac223c46baaa1b22f6d0b1801aa6a3ff54d283f7b570dd", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_cron/vars/freebsd.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6dc1f19ad13663f68075ebfc933bdbef4532c527a440b1a82ecad85442dffb05", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_cron/vars/redhat.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c19debd2669548a152b9b2da7e584f86bb5d5e51ffe6612adff775056cbc876e", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_cron/vars/suse.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bb743826acea65ecb3cec1af4c16692aaaf6a80bc42407f45a4cb219dd3e21b8", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_epel", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_epel/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_epel/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4013a5ad79e7944851ff6a5be0d25dbb2e4354aa6be08e3c435d7707e1d8576c", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_etcd3", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_etcd3/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_etcd3/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cc96f60f9770d3926de79613a52d26a89400a88bbb2680746b6b8546b20d23c9", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_etcd3/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_etcd3/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_etcd3/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_etcd3/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fdcd7485be383a77858e967fd1d53038375736553dd7b8d5579c7d6e49d24c3d", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_etcd3/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_etcd3/vars/RedHat-7.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2327ed2b66086f12d6644563781754dd9d8131ad9beef32f315e2f9861504deb", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_etcd3/vars/Suse-py3.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cc8858e017dfc003a31b4bd8e20a7d442a996e94bca6882354d9cf9b7a43fabe", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_etcd3/vars/Suse.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cc8858e017dfc003a31b4bd8e20a7d442a996e94bca6882354d9cf9b7a43fabe", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_etcd3/vars/default.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "42fb21e5a17c47ffdc62fa1ef9ae61f066ae3010150ad08f9ed877e440872167", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_flatpak_remote", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_flatpak_remote/files", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_flatpak_remote/files/repo.tar.xz", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "673cf7c93e0a5773c1aed10345ddeb3cb2fdaac193b311970f0a9f1929c1ddae", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_flatpak_remote/handlers", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_flatpak_remote/handlers/main.yaml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f17b28729bd86bb98423f9bc6e16598b48540cb9cf6ed2d69f597b319aa452b1", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_flatpak_remote/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_flatpak_remote/meta/main.yaml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5b3a4b611769090095ae6d6640f5a7ab139cbd83c9f1a06cef6668bcaab35d2a", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_flatpak_remote/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_flatpak_remote/tasks/main.yaml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "385cae6eaa6fc7c94a7f5399540a4f268748a3ed8df6ee839d49102820ecb37d", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_flatpak_remote/README.md", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1ba3b350ef99a406c2000e141c829d66bfe3aa11a572072a00d1f4438886d6d4", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_flatpak_remote/create-repo.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ab7ca88e86f4a63db15728eb50b27ec6604da8efd3fd89df8626494f3792b5d4", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_gnutar", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_gnutar/handlers", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_gnutar/handlers/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ea523c40ea286f1458a7f4135dcc548e50ef105b03aae76c11696661742ec2a7", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_gnutar/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_gnutar/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7e8fbc4c57e31732b07eecb5c841956fc63abb50a723f77779e510b9f118e8bb", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_influxdb", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_influxdb/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_influxdb/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2fda0abb2d8bda4e8ca6ea30d7994b22a90871f7f7a3aeb7fbbc86c1d622fff5", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_influxdb/tasks/setup.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b0636fb6ff7220ecedf7d3b481e221726663d418caf6fe7e8f3f6b1acd30ce42", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_java_keytool", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_java_keytool/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_java_keytool/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "edd8a1449dca1cf2ff7cade76d428a198a21d20a8b7b989fc4fe8ce3b0a93f3e", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_java_keytool/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_java_keytool/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "acf40ef4897d85734cdfec310d31a05515b0552f9884e6afcdddfa3c25b57b11", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_java_keytool/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_java_keytool/vars/Alpine.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bd76a70eed20070c8ddc348c569f514fa9c52c6dc90f6496b7dc0539bcb5a04e", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_java_keytool/vars/Archlinux.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2910dbc11fb229686a3d1116e62a22a9f7b7f1d3d737c4b6ff16dcf351734fb4", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_java_keytool/vars/Debian.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5467bf8784847f9ae6e9da39e4935a32012900c7423e84e43560e588911c2e9c", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_java_keytool/vars/RedHat.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "502530ab512b2ecf51959f4e81d899537a6da192c7b4e6a88bf860cf950f2aba", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_java_keytool/vars/Suse.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "502530ab512b2ecf51959f4e81d899537a6da192c7b4e6a88bf860cf950f2aba", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_mosquitto", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_mosquitto/files", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_mosquitto/files/mosquitto.conf", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6b90092ad37894754edbb4c4ab997e12f96f8a93b26ee58dd547cda7e1ae04a8", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_mosquitto/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_mosquitto/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "018110439f4fa79f060ac894fe54567fd9c3eb410aedbdf0b4aaeee1ad5fd705", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_mosquitto/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_mosquitto/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6384c76b03ae445b891167e13407ca4915637387a6a9bc6c23bd1d3d92baffae", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_mosquitto/tasks/ubuntu.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "538431cedb49bda0ca4126c7091f3a46cf282e50094e14ebcde08e17aa55236a", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_openldap", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_openldap/files", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_openldap/files/initial_config.ldif", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ae3cbc203ebfd2d9a3810399be1923b2f8d41162068f92f0cd473033c86bc697", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_openldap/files/rootpw_cnconfig.ldif", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "35f150a5a1d546167e9bff24e558439396208879876812cd1bc210252a86274c", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_openldap/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_openldap/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c2af6405d42a5abd438f4a256a6a5c19b8fb5eb5559209c0cbabd150af911837", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_openldap/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_openldap/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ef161491eeeca8084a7a9b169bae69bf2edb6a1e84b56e40870e2aa9026533ae", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_openldap/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_openldap/vars/Debian.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6300a7fa4f660acb989c8a12d2fc55890c00c89e8ce430b82dc8ac9f7e75acd0", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_openldap/vars/Ubuntu.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6300a7fa4f660acb989c8a12d2fc55890c00c89e8ce430b82dc8ac9f7e75acd0", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_opennebula", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_opennebula/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_opennebula/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "87ce8a986194b78ccb7f6aa2fc8a64d36e6036e7f87f400530420961349b7962", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_opennebula/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_opennebula/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fc13dbd2b6322320932fe91c3127dd7bdf0045fd5922f4b24c4ce2f8fa4f1ba7", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_opennebula/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_opennebula/vars/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0a4c50dabbd36c9c08ae74da3171fa2953eb30fa8b4ed2eb6ee31eddaf6938ea", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_openssl", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_openssl/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_openssl/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "edd8a1449dca1cf2ff7cade76d428a198a21d20a8b7b989fc4fe8ce3b0a93f3e", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_openssl/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_openssl/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f00155101aa0884fac290c2c6cfb78f3dd35bbd66ea323ae8c644fea513df808", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_openssl/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_openssl/vars/Alpine.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0cbe4792a30e8708b19d3e45506c379f7c7057259abcb1c15ec41568661b9dd1", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_openssl/vars/Archlinux.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "91888bbcdfcc4f68838c129eb0e8d38c877943c5d091fbfb034bf56c5fc91f73", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_openssl/vars/CentOS-8.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3cf78020540686456379949d5c5b5aa61fb58d67a0b8a1e78ca167b531639ec4", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_openssl/vars/Darwin.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "705c4892e0e01103b5a23fdd4105897a36dc9cf4c637c112941fa8335ed2f6cf", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_openssl/vars/Debian.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "493ddcecafb42d30d524867c87e06b25de9cea7ca3f70344204e553eb3be9e25", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_openssl/vars/FreeBSD.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d331cbb706303b121aa40b795b75d3e746d09944e98128fb6e1cfe6630f8991a", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_openssl/vars/RedHat.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "30af4802c88ed30b55292ed0be717bf65222adbe96b6139d1292c6b5b7f9064d", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_openssl/vars/Suse.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3115f51d4621ae5202defaf4df42219f25c2ac20115968d0b749644c58646e8d", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_pkg_mgr", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_pkg_mgr/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_pkg_mgr/tasks/archlinux.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c12b1f4826eb17a4e9d6787ca8b340a49704b71b65a023f9a2e2e6ee469af3e5", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_pkg_mgr/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4bfccf8ff60bf5f1b2b39ebe088eaabb616620191f3a4049b851ab41e0daec1e", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0349988df512a65575f9594c62d6d8a0aa0cea38ef60f75236e6c9c1bb075d58", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/files", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/files/dummy--1.0.sql", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e4e7839720cd873fbfbe855a61c55f4d69bf5154c420a5a776daccba0db0326e", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/files/dummy--2.0.sql", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "be49da51a69d0f8af9ad8bfd120189b95aa9feb2ea00be9e2f6e06af3a5c754b", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/files/dummy--3.0.sql", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a16cb164d32705033b9e7a7c4e9b8050de79c561deddbcc8603e8d0d59cb563e", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/files/dummy.control", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e8000b3819e84f8f1af575e137e4f478bc16cef5b0b11867f4d348840ea34bff", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/files/pg_hba.conf", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a1d8fd0e6645d939cf0fc5a67738039e036f06c540efeb8a18bf9fed779ddb40", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e70e5de9259cd000c3c58af07da1047f5101929c2cad733c2467096c2227a2dc", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/vars/Alpine-py3.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "74a386f6583f065cca5cd8c4b256bd500cd02b1daa82d1f8b23c213e961e0662", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/vars/Archlinux-py3.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2292d5f57c49a6845bb162adbe5fc94006eeb421c0022e65c3194930f2999d7f", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/vars/Debian-11-py3.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "96f1178c9a80528ccb2681b5b0f5a8262eb0861f65cc27dea7831e49f26fd43e", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/vars/Debian-8.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cec5e777df87e1ef7dfac426286cc5a26d3ed9bc8d7e4e3a8c307f6d670b5edd", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/vars/FreeBSD-11-py3.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d47932fab9e40019bcb0a06d39d747c1fd6e79147de58fd879b391f6e88b5b43", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/vars/FreeBSD-11.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3aed68dc0d315a161453b95ef5d5fc2e386fe3569386bc1620128bd59e955afb", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.0-py3.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d47932fab9e40019bcb0a06d39d747c1fd6e79147de58fd879b391f6e88b5b43", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.0.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "641d72a4f0cd5bb640de8ed043befc0cadcf9c70cc399f0a1485483e32c35fe7", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.1-py3.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2c2ce9a5a95a2c26bc66112eec85850bb0c06252fcc7982bb02b7faa70de01c1", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.1.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b8b67d7d2dc8c0caa1b89de5338dfabcc75e6480ecc6cd92bc26da43affd9568", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/vars/RedHat-py3.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "40a6304f5e4cf6e2baaa10718ae657c1ca67bb1cf127bd971b2a438d6c64f215", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/vars/RedHat.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "510078001c894619b1437c45e647391781d9fb1a17bcff5cb26d7939a4970a16", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-12.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "83efd43f61060d2f160e5a2fa8fcd185029672112068fc103f0e35ab384bb8b2", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-14.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "44b26cda219358db0bdac8d4df06f14be1405c0ec75c9cd066b79a84fd97990e", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-16-py3.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3dff2ca237b634d4731dad7c812af330acd802a9aafa126c1ce623d80a2330b4", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-16.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "be5506a46fa7e9d06827fb16fcbcb51093d07c4e50e476b928bd285e4d3d6d60", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-18-py3.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "92b075e3829177e0a0163e38e7a65f108084d520ac1d4f55031c4b574654a7af", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-20-py3.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d7d2e4563b1a21ad4f7bb3a0d0b6eb26ab6f9a51e86dc0ce3c7c44d3458b85db", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/vars/default-py3.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bd40f6ab072c5b9d5c83d83595fc6a6489dfc8ddeb4c470b01d8b6b3d539b361", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/vars/default.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "64f6e20d24c3def6bae446791f056131c8d272d6dda5f354ae63bfdc415e0819", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_redis_replication", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_redis_replication/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_redis_replication/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d605d18b50ff780848a7647ab0ce8170fe8e3904c3a268ecf1f185aa33db7796", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_redis_replication/handlers", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_redis_replication/handlers/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "42190d97acfca9aee10d326ad2453f81345598d582785e90975e3ebee4f40884", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_redis_replication/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_redis_replication/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d1111d58412ebbf4e3cdb9e7aa784b73047b602f90c76c9151c6d4230d99f933", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_redis_replication/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_redis_replication/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bcc713d4e0cc586d7f5fd383c716a4e8f3ac78f2bece80a8094afdb4a25be897", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_redis_replication/tasks/setup_redis_cluster.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0ee9f476b10468f8c9a116254fb6687d0fb6cbb364fb5e6e7bf3bd490bbd013b", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_remote_constraints", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_remote_constraints/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_remote_constraints/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1e8d632f9db7209967c5b2f6d734bede09841acc7b898dafc19f31c72cee9929", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_remote_constraints/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_remote_constraints/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b625673c8e7b2a5dced6117b01aec6f25452246117746bd23450dcf389a61883", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_remote_constraints/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b34f7e77b6117dd66a49957c17656b22e5dfa444d8f93af2b4e1d7f1450a5a3d", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_remote_tmp_dir", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_remote_tmp_dir/handlers", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "050157a29c48915cf220b3cdcf5a032e53e359bdc4a210cd457c4836e8e32a4d", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_remote_tmp_dir/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e273324ab90d72180a971d99b9ab69f08689c8be2e6adb991154fc294cf1056e", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2441ac1753320d2cd3bea299c160540e6ae31739ed235923ca478284d1fcfe09", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bcb3221a68dc87c7eae0c7ea50c0c0e81380932bf2e21a3dfdee1acc2266c3f3", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_rundeck", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_rundeck/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_rundeck/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "45abf38083475f7c347146e289533e59b374dd8735446a71f301de517b031375", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_rundeck/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_rundeck/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c2af6405d42a5abd438f4a256a6a5c19b8fb5eb5559209c0cbabd150af911837", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_rundeck/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_rundeck/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a3a79997e5e6fa57f17238080b4bee234aa15cf9f978f37e99be60765711640e", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_rundeck/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_rundeck/vars/Alpine.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "659f8db0e03052dde7c92fc94faed130a80f8e56e7ea4d3b6bdf1d1c14484faf", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_rundeck/vars/Archlinux.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "324b75fe99cce7878ca4b253ef023c4341fe9c5482a371da999e5ef37d2a24bd", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_rundeck/vars/Debian.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "85bef73efccc60527136788d1e9599414cbe28283c1f2ef50f493ce4886e34bc", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_rundeck/vars/RedHat.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "02353942fe24748ec6c1bf247afa3ec507e28954e28b6dcf60264b4988bf7c98", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_snap", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_snap/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_snap/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "488ff8657b223511262ef52793496b9efca1ae9fd468dedaa9bc48950f73978b", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_snap/handlers", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_snap/handlers/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a8a550631c3cf2d8a29d3bb2ca1fde7756fe733a497220a4ba47e5d3192ea000", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_snap/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_snap/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ff95c2df57e2d968c47a85a41a0586572bbe3772cc9870e0045f4b3fd83fd58b", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_snap/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_snap/tasks/D-Fedora.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "546288ee457cb0cb0940bc34228080a6f215b4638f1ad5c61a9135cfd68ba529", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_snap/tasks/D-RedHat-8.2.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "710488192c243e9d7ca534a825549d15d454d53b756477d78bcda7fab446ba4a", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_snap/tasks/D-RedHat-8.3.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "710488192c243e9d7ca534a825549d15d454d53b756477d78bcda7fab446ba4a", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_snap/tasks/Debian.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "546288ee457cb0cb0940bc34228080a6f215b4638f1ad5c61a9135cfd68ba529", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_snap/tasks/RedHat.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "546288ee457cb0cb0940bc34228080a6f215b4638f1ad5c61a9135cfd68ba529", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_snap/tasks/default.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "546288ee457cb0cb0940bc34228080a6f215b4638f1ad5c61a9135cfd68ba529", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_snap/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "00b7fac9bbd3a4ee7fee27c210992cded4d331dd1cd4d9a5409be22cb91748b1", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_snap/tasks/nothing.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "710488192c243e9d7ca534a825549d15d454d53b756477d78bcda7fab446ba4a", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_tls", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_tls/files", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_tls/files/ca_certificate.pem", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "47ddc514d030d2dd28b98eb257b690b8aa94abc7b657b43caf6e32e2e5a6bf9d", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_tls/files/ca_key.pem", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0157029faae2207eaec99b67360db8ca46fe6964eb98165a0ca4ac56cbed7ebb", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_tls/files/client_certificate.pem", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1c88ee01e59fe19f497b74f0fb15a6d705bbac6df554d16f2f80fc25d2723bad", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_tls/files/client_key.pem", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1ffc8420355a69fecd60242feb89bfef5517292aa9129ea79e99bb36ffd80dc6", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_tls/files/server_certificate.pem", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a93a860161059bf8b6d065d2b01a5218a7beefdb075fa704e0139d4f96bdb61c", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_tls/files/server_key.pem", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0bb0b33983d37d5b6404c0feb969e80d0787331f774d2b8024570133d65851f6", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_tls/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_tls/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3b7623cd0bfcfa8b836f0eed7e40c6546f781ea549220f409320fd3517590694", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_wildfly_server", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_wildfly_server/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_wildfly_server/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "10a80fb6bf47c167c0d3546ec8b477a32c8d9a92767d62d3c1d0a77132838e42", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_wildfly_server/files", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_wildfly_server/files/wildfly.conf", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f8c07bbd656b9d303974d536af56b75593c9b831d17ca17ba7af2c14502b7be2", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_wildfly_server/handlers", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_wildfly_server/handlers/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cfb6a9b45a8f36d652d845b282219a344b7a53c7474b27533e7231a1c736dca7", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_wildfly_server/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_wildfly_server/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "847bb6c4dae501f75ec017de8302d70c08bf23548a82058650b1fbd1180cd218", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_wildfly_server/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_wildfly_server/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6e26de7ad0b193acfcc863b4342855ec844466c84d864c21da7aa05c0d00cfd7", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_wildfly_server/templates", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_wildfly_server/templates/launch.sh.j2", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1aaff5c06ef04fcbcd51df947fd85c94dede66e35d188166a03678720ba6bc56", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_wildfly_server/templates/wildfly.service.j2", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6199adb74eafdedff83e41c4377b4c778d1c10773461f479c3b63eb2de90014e", - "format": 1 - }, - { - "name": "tests/integration/targets/shutdown", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/shutdown/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/shutdown/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "236815aaae155925406da88d56b84bbdf2c51c3cdd1385ca2b4107e761de4950", - "format": 1 - }, - { - "name": "tests/integration/targets/shutdown/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "811322fad85866ac31f9501f8da0c7072809065397d53c06712447946b746b6b", - "format": 1 - }, - { - "name": "tests/integration/targets/snap", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/snap/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/snap/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "83529c5d557f459b40be09fc10dfc4cc4e688e127ff456b6aa129b9bf9dd1c90", - "format": 1 - }, - { - "name": "tests/integration/targets/snap/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/snap/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ded2d739d28cfa5095266e709cf4cac36227dec931d1736a3feefa264e8c62d1", - "format": 1 - }, - { - "name": "tests/integration/targets/snap/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "145304c99af09bfdf3c3cea216e59ebcd872946b2750c1a8ad5f295e2260b979", - "format": 1 - }, - { - "name": "tests/integration/targets/snap_alias", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/snap_alias/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/snap_alias/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "83529c5d557f459b40be09fc10dfc4cc4e688e127ff456b6aa129b9bf9dd1c90", - "format": 1 - }, - { - "name": "tests/integration/targets/snap_alias/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/snap_alias/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "82b3d7dc5692c55c426dd75d19eb982564948283f89724d302c758cb21bd4953", - "format": 1 - }, - { - "name": "tests/integration/targets/snap_alias/tasks/test.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "296d6119668067dcedf82891126264a32e2173788ea36061bf76bff8396f13b1", - "format": 1 - }, - { - "name": "tests/integration/targets/snap_alias/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "145304c99af09bfdf3c3cea216e59ebcd872946b2750c1a8ad5f295e2260b979", - "format": 1 - }, - { - "name": "tests/integration/targets/spectrum_model_attrs", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/spectrum_model_attrs/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/spectrum_model_attrs/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "13092237372d3237348001411f7d1248f09ed49eb37723cbc04a777c57437ca3", - "format": 1 - }, - { - "name": "tests/integration/targets/spectrum_model_attrs/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656", - "format": 1 - }, - { - "name": "tests/integration/targets/ssh_config", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/ssh_config/files", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/ssh_config/files/fake_id_rsa", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/integration/targets/ssh_config/files/ssh_config_test", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/integration/targets/ssh_config/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/ssh_config/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "441dc00ccedb1b0ca62ecbd9c59b32154abb321c558e19e0a8ba04a8ad213e34", - "format": 1 - }, - { - "name": "tests/integration/targets/ssh_config/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/ssh_config/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9fdaa5df59486f2bdf7e9e56a9136b60182bf48bd326b62d4d28db615ec96c95", - "format": 1 - }, - { - "name": "tests/integration/targets/ssh_config/tasks/options.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "abecc6dcf4a246368324d00a9f981a0f5b3b5b370ac85cabd0ae133cc9dfb93b", - "format": 1 - }, - { - "name": "tests/integration/targets/ssh_config/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1880a68a324959394802537865a680603cbce4018675f70f6329281d365d96a4", - "format": 1 - }, - { - "name": "tests/integration/targets/sudoers", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/sudoers/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/sudoers/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2b9e883c9eef20446e3a3e460c1b4c391680e57f06dc6459a848abac06462539", - "format": 1 - }, - { - "name": "tests/integration/targets/sudoers/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e5a8866bb35edfbfde89676bc1698a7dd27c89530da249de619169892e7482d1", - "format": 1 - }, - { - "name": "tests/integration/targets/supervisorctl", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/supervisorctl/files", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/supervisorctl/files/sendProcessStdin.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bb8184512251663d37e1c79ba14c7d962dbac0cf77a1af593456823c223e293c", - "format": 1 - }, - { - "name": "tests/integration/targets/supervisorctl/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/supervisorctl/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "769fd1fe7bec847f2f097de2fc87ddc3062a542f24ea15a783fcea9e18b038f7", - "format": 1 - }, - { - "name": "tests/integration/targets/supervisorctl/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/supervisorctl/tasks/install_Darwin.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8193ffa8c321692d3f855b32deb703157078f27b7e661207713fff4e3f10322d", - "format": 1 - }, - { - "name": "tests/integration/targets/supervisorctl/tasks/install_FreeBSD.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8193ffa8c321692d3f855b32deb703157078f27b7e661207713fff4e3f10322d", - "format": 1 - }, - { - "name": "tests/integration/targets/supervisorctl/tasks/install_RedHat.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8193ffa8c321692d3f855b32deb703157078f27b7e661207713fff4e3f10322d", - "format": 1 - }, - { - "name": "tests/integration/targets/supervisorctl/tasks/install_Suse.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8193ffa8c321692d3f855b32deb703157078f27b7e661207713fff4e3f10322d", - "format": 1 - }, - { - "name": "tests/integration/targets/supervisorctl/tasks/uninstall_Darwin.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7760bbf840a5a5b5273e5b518ff7d4305ebcbea60e3de38af307bf2bf798e663", - "format": 1 - }, - { - "name": "tests/integration/targets/supervisorctl/tasks/uninstall_FreeBSD.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7760bbf840a5a5b5273e5b518ff7d4305ebcbea60e3de38af307bf2bf798e663", - "format": 1 - }, - { - "name": "tests/integration/targets/supervisorctl/tasks/uninstall_RedHat.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7760bbf840a5a5b5273e5b518ff7d4305ebcbea60e3de38af307bf2bf798e663", - "format": 1 - }, - { - "name": "tests/integration/targets/supervisorctl/tasks/uninstall_Suse.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7760bbf840a5a5b5273e5b518ff7d4305ebcbea60e3de38af307bf2bf798e663", - "format": 1 - }, - { - "name": "tests/integration/targets/supervisorctl/tasks/install_Linux.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0fc88192e6478dd7c0224de6281f6a690a753ffbb6df1d4e114a7e3034679e27", - "format": 1 - }, - { - "name": "tests/integration/targets/supervisorctl/tasks/install_pip.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8193ffa8c321692d3f855b32deb703157078f27b7e661207713fff4e3f10322d", - "format": 1 - }, - { - "name": "tests/integration/targets/supervisorctl/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "89d2fd307d63ea189cba1c12f93e7fff2e03e6abb68bd0efcacb0e5f77a1efbf", - "format": 1 - }, - { - "name": "tests/integration/targets/supervisorctl/tasks/start_supervisord.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7761c3bde37c76ac85bcc6de9bf55467f8942084209edb5f52d9cfdd9af76a0e", - "format": 1 - }, - { - "name": "tests/integration/targets/supervisorctl/tasks/stop_supervisord.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "adb0abf951962116ca6e1843f1721b4490170b61dfb1a3ac4ea6f4aa3bf5f706", - "format": 1 - }, - { - "name": "tests/integration/targets/supervisorctl/tasks/test.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ea89555b19dc74fff88355d178fdf021dc33def2f6caae51f2c40b029e897b43", - "format": 1 - }, - { - "name": "tests/integration/targets/supervisorctl/tasks/test_start.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "40c50eeba50d448a234a6c7fb6c769c813c9d5b6041a27d58528b2e120efdbe8", - "format": 1 - }, - { - "name": "tests/integration/targets/supervisorctl/tasks/test_stop.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6a2d240f9951ce2c4bb2363a6d70e5038da5607af86055c08cbf064034551dc0", - "format": 1 - }, - { - "name": "tests/integration/targets/supervisorctl/tasks/uninstall_Linux.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "151565b1b6437c5330d7b1619da7dd7ed96393e5366d7eed6f6bb023ec0d7b90", - "format": 1 - }, - { - "name": "tests/integration/targets/supervisorctl/tasks/uninstall_pip.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7760bbf840a5a5b5273e5b518ff7d4305ebcbea60e3de38af307bf2bf798e663", - "format": 1 - }, - { - "name": "tests/integration/targets/supervisorctl/templates", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/supervisorctl/templates/supervisord.conf", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "877476ccc7f51038a097f2e52ac552297c387e71c37a25d523ce6f551b6e9591", - "format": 1 - }, - { - "name": "tests/integration/targets/supervisorctl/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/supervisorctl/vars/Debian.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4b73003c59b63fbb82a349cc0693c5a352b9a232ff520bbde67f0a76b947c909", - "format": 1 - }, - { - "name": "tests/integration/targets/supervisorctl/vars/defaults.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9a0893de74514778586ad0c0240c024b26d16c0c8c4c7eec89fe89d7a475b752", - "format": 1 - }, - { - "name": "tests/integration/targets/supervisorctl/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b268603706f3146929caf02b9766fd864cb33d10c1be9d581ef762640ad0dc26", - "format": 1 - }, - { - "name": "tests/integration/targets/sysrc", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/sysrc/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/sysrc/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "dd52e39be13eaea8f94a65caf2c9bce54bc0917122ec860e494daf41b91d100b", - "format": 1 - }, - { - "name": "tests/integration/targets/sysrc/tasks/setup-testjail.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "711db7c1d698d974d4b3d58a0ecd3beaae49aa9dfea4f89e9e305c45e28dbc06", - "format": 1 - }, - { - "name": "tests/integration/targets/sysrc/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "80fcac1bf73988cab479c3db85a45b2bcf71260e19a6e691dca88213b7c83e42", - "format": 1 - }, - { - "name": "tests/integration/targets/test_a_module", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/test_a_module/collections", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/test_a_module/collections/ansible_collections", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/test_a_module/collections/ansible_collections/testns", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/test_a_module/collections/ansible_collections/testns/testcoll", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/test_a_module/collections/ansible_collections/testns/testcoll/plugins", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/test_a_module/collections/ansible_collections/testns/testcoll/plugins/modules", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/test_a_module/collections/ansible_collections/testns/testcoll/plugins/modules/collection_module.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c3f0114d080c409c58c8846be8da7b91137b38eaf2d24f72a4a61a303f925f4d", - "format": 1 - }, - { - "name": "tests/integration/targets/test_a_module/collections/ansible_collections/testns/testcoll/galaxy.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "69c0cb85c493f4a56758eb814a9c36104cf36c449a0e54d1a6b4b72bbda01ec1", - "format": 1 - }, - { - "name": "tests/integration/targets/test_a_module/library", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/test_a_module/library/local_module.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "853a4708a3d35eec2ffe537982f938eb947da2faf1b405a4690b5b6a2ed5dc7c", - "format": 1 - }, - { - "name": "tests/integration/targets/test_a_module/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8a5657d4075cac91b7e2fc1ffaaa04d37136cc64e51c75c2d261147519bb12d6", - "format": 1 - }, - { - "name": "tests/integration/targets/test_a_module/runme.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "737ebeb3884ca0395d497b4d357dfff97dcd1463a494b2fbb2e8dfaaf1b4251a", - "format": 1 - }, - { - "name": "tests/integration/targets/test_a_module/runme.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "566f7be489b33ddce7b23f85cbb3bc4793cc0317f72ea634af8eb40efb5a199c", - "format": 1 - }, - { - "name": "tests/integration/targets/timezone", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/timezone/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/timezone/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c2af6405d42a5abd438f4a256a6a5c19b8fb5eb5559209c0cbabd150af911837", - "format": 1 - }, - { - "name": "tests/integration/targets/timezone/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/timezone/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "47f761013cffa238fe7683007d036d0e5a25fcef8b473dfbaaf1c26ce5f20265", - "format": 1 - }, - { - "name": "tests/integration/targets/timezone/tasks/test.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "73ea1b70fd2b5411eca231acea9212ac2e2a0a3eb2ca93618638bd88108bfb4f", - "format": 1 - }, - { - "name": "tests/integration/targets/timezone/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "de4b021a7ffae564d5e878e8a94a4121b367cb3b51a9b0dfe2b08943cdafc492", - "format": 1 - }, - { - "name": "tests/integration/targets/ufw", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/ufw/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/ufw/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a", - "format": 1 - }, - { - "name": "tests/integration/targets/ufw/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/ufw/tasks/tests", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/ufw/tasks/tests/basic.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cbfd03a4a6a79672ed38e204abed663ea00315e59a26d7d7b5acd166efc16de9", - "format": 1 - }, - { - "name": "tests/integration/targets/ufw/tasks/tests/global-state.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6adba710aa58f28cd81d6a0e3620c2fc38587ef14b3e26a85f41a7dd2814b20d", - "format": 1 - }, - { - "name": "tests/integration/targets/ufw/tasks/tests/insert_relative_to.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b28169e97fa8a69653ad91f5bc21cc746d26c1c310170652b5a94d9161fa6064", - "format": 1 - }, - { - "name": "tests/integration/targets/ufw/tasks/tests/interface.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3bc22ae0cc6f0384c2f29530b9cce48be2fa992ca72367fec2887f367f6899fc", - "format": 1 - }, - { - "name": "tests/integration/targets/ufw/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "071fa18e8ee40f0e0aadffab2ad453eba19945ab310fe864df2b478e3006ad9d", - "format": 1 - }, - { - "name": "tests/integration/targets/ufw/tasks/run-test.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bab5dae8b59202497a94d3a13d9ed34aa200543b7ea8d8f0cb3a24d16b115fee", - "format": 1 - }, - { - "name": "tests/integration/targets/ufw/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5a8fe3f6351eba91318e83afee3686003438a97bf10fa9d77330e99742a5445d", - "format": 1 - }, - { - "name": "tests/integration/targets/wakeonlan", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/wakeonlan/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/wakeonlan/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0b3c431835de84d4f4f33d43d8e3a9ce9fabe19ff24b0fc617876c11b7e97208", - "format": 1 - }, - { - "name": "tests/integration/targets/wakeonlan/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ffe208f20c1cb038da9750e438e2377f03d31145867413d919e6a025c15d270b", - "format": 1 - }, - { - "name": "tests/integration/targets/xattr", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/xattr/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/xattr/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e1d8dfee07d5c920e531fe96a06e527189deaf13fd4d684ea513339a64dd29a1", - "format": 1 - }, - { - "name": "tests/integration/targets/xattr/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/xattr/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "769fd1fe7bec847f2f097de2fc87ddc3062a542f24ea15a783fcea9e18b038f7", - "format": 1 - }, - { - "name": "tests/integration/targets/xattr/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/xattr/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6409d500eead57967d4fd868cb5852adc1822fe58bd1ed0f92f2ea764de50c54", - "format": 1 - }, - { - "name": "tests/integration/targets/xattr/tasks/setup.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8fd614bf34876618e9ca28dc4c49818fdfc0a7982ac0814e28df3741af5930df", - "format": 1 - }, - { - "name": "tests/integration/targets/xattr/tasks/test.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a010da6a9ac6709b5e4fb53ebd960462e0e8afb9d5564dadb4dc013b21b91c3d", - "format": 1 - }, - { - "name": "tests/integration/targets/xattr/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1fbcab4b8d4b681f9278736b73af5b7e26c18d133f4c6df700158b2be244904f", - "format": 1 - }, - { - "name": "tests/integration/targets/xfs_quota", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/xfs_quota/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/xfs_quota/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7fcbdf40bed8e4180a0f571f5f979872d1aea52c476a80994e7f4e3a488e9225", - "format": 1 - }, - { - "name": "tests/integration/targets/xfs_quota/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/xfs_quota/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1e8d632f9db7209967c5b2f6d734bede09841acc7b898dafc19f31c72cee9929", - "format": 1 - }, - { - "name": "tests/integration/targets/xfs_quota/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/xfs_quota/tasks/gquota.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4f78ba14c8dd91da0a4fca770577755f07aeecbad8df8acd5a96b7dda65c05cc", - "format": 1 - }, - { - "name": "tests/integration/targets/xfs_quota/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1f3028e8eb861d0a8a56a98f1835b4bbe784726482a4dbeaef5a2eeedb28f26f", - "format": 1 - }, - { - "name": "tests/integration/targets/xfs_quota/tasks/pquota.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f5144a0c5a5f2fc1f0181a699949544342979e6d6e20668070bd263b07744747", - "format": 1 - }, - { - "name": "tests/integration/targets/xfs_quota/tasks/uquota.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d796e8378720c441626f4f9dff7e13f5f7d7aa16e3acd73a4618c01abd8e289b", - "format": 1 - }, - { - "name": "tests/integration/targets/xfs_quota/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d2726eddce66cc8903ec22708217894083028934ccc9874b779234699c822298", - "format": 1 - }, - { - "name": "tests/integration/targets/xml", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/xml/fixtures", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/xml/fixtures/ansible-xml-beers-unicode.xml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b88157804ecb91179f87676a83ba7980af70efe935b17d39c18d05c298f57cf5", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/fixtures/ansible-xml-beers.xml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c66414fe6d4b64014dbf96c994c07cd97b38e03e6f476cc0d9f0ef27ddc96df2", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/fixtures/ansible-xml-namespaced-beers.xml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "63dbf18a99b1f1eb359e912bea594f9d2450438068213158c145d3c815c9f0dc", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/xml/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/results", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/xml/results/test-add-children-elements-unicode.xml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "18833a8f2893edcb2ae267ed7f0580d06475e7da070c4eecabe16435fd98b0e8", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/results/test-add-children-elements.xml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6d479f1699c9dfed26820f218e0642689c9a7b75f9df8a49d22158ec117f0a43", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/results/test-add-children-from-groupvars.xml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "463714365dadbb9920967fa83c913702ffa1e5488e86624456970b4ab8928b9f", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/results/test-add-children-insertafter.xml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f12f8469913b495e3138ad3207c4a228eb99c584b016021afff0ebd565033e36", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/results/test-add-children-insertbefore.xml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "54632a063875c7558eddb674a785edd2ae3d6360d0988912ae3ee3f50c6f0082", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/results/test-add-children-with-attributes-unicode.xml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ff7167bef3d711a8dec1572ed1128746ea63cc69ba51257bf59b56f00113846b", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/results/test-add-children-with-attributes.xml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "adc4d6df33b14a6b2dcbf9e2df9ee41c12f31f2690f7b198b7ee810ec29329c1", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/results/test-add-element-implicitly.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2f51c7ddee9d1cd6e1bd7ab58dfca1bd58d56f1a27bd3bdecc49428a6a58778a", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/results/test-add-namespaced-children-elements.xml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e50992baa59f7a611e4ef08211dce8847618ecbd0b786fc01a17b41330405200", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/results/test-pretty-print-only.xml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "36e6ffd3c5397494980ebfe9771ba624b7d920e3ce3d7bb843f364675fbcddb3", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/results/test-pretty-print.xml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "824b2a646c1c901e70bccfb7e1ee63721c9e8cee7994133dd166178d53e67065", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/results/test-remove-attribute.xml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7e38225db2b6f8a1c485a328ad08a8b0c24ca3b017dfa68366cd609991b9104f", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/results/test-remove-element.xml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ecc4e61ff85be0098aada5efc9e3c0d8424c98baff4f8901d991ae08c08416f2", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/results/test-remove-namespaced-attribute.xml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "77a5e85cecebfe2a8fc432d8bbae1aee6b313956f3f2c12383310ad151b6fcb6", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/results/test-remove-namespaced-element.xml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e2571bd28e49e82ce1b494fd2162f89bb82947615a9e775a1f7399df435f3e19", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/results/test-set-attribute-value-unicode.xml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ae94b872105fd18f6cee5e0cf4b9b0527757a6da94e52638f2e492dea88f2034", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/results/test-set-attribute-value.xml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c25c127ada5a292507a55c36919bc801edac4bcd6f85807727e1cd76e1e5cb4a", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/results/test-set-children-elements-level.xml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "91ad02bae7452891174b2178d2f6556e0dfc07a5e8f491d98f0e11efece9b1ca", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/results/test-set-children-elements-unicode.xml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "600316e3ef64bba85725621be258991fad9477b00b25422461aa59e703363291", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/results/test-set-children-elements.xml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e70378485034552978d9bd0b525729717e75c33c73e947f4c02779f6ca8a9fa0", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/results/test-set-element-value-empty.xml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b492c8b81778ca3793f788cdc189c07f9170e4b75e63b49f09768611b2953868", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/results/test-set-element-value-unicode.xml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "aec28ed22238a57708d1c7e4b7416e3fd64b91d7ea679df3d511d0ff4d78d794", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/results/test-set-element-value.xml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9d68db62d1a5fbad0082338ef7f7743ff4c60e1ba452d507496a0df95980060b", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/results/test-set-namespaced-attribute-value.xml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a06edcd61c987b3076806449dc923591867f6a38b059ee1e9582a165f7f6fec8", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/results/test-set-namespaced-element-value.xml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e5e5f1d03f86bec1b16af94fea829c2303a1fe38050cd7453de87290e7b2d0dd", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0b6b1416a0a45fcc8d0a948ad10fc71fc24801dad272ed44be6c585708ae997c", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-add-children-elements-unicode.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d0f0891aca5c0e0b9951ec8a11f3ad5b90fbcf83ebef449808119d8a6cf370be", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-add-children-elements.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "89a2e6ade7c9a228edf872e867ae671ba76eef9395212e1f12f5520ab6bd3f0a", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-add-children-from-groupvars.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "14216bc42837eff570a9c5e9296356beca1ca001546b7a0a150dd1f51128af89", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-add-children-insertafter.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f24542f28b9d92973666d0c144e787f0fc0e328f9aa7fb2b9854e2a851a0cf51", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-add-children-insertbefore.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "97be423b5077229069a36e3aabf83c2eac677b1c2d634fa6e4f94c3c0ce988b9", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-add-children-with-attributes-unicode.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f1854586757f483170a4dd5e866f3bea185499b3c89babae051da70e585814e1", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-add-children-with-attributes.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6d089d03f6f644ad6a6406a80cd3190acbbf4e5e518883264e30c0dd27180510", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-add-element-implicitly.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "84bbfaf0c30b5f06dc2b6e8b59f74acc6c239262af8e5cf3ed7a3a9fee63ce02", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-add-namespaced-children-elements.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c98aa13d97b97120b17e3c73c2e2ad16b17918393241788ae2f91483fe02a755", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-children-elements-xml.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e57b3bfdca4086e6518a93ce5043cde55573df0d2143eb6013263ea5b5b9090e", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-count-unicode.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ad8e481bfaeaf23fbd794eec46f28637ff748b6b012d59f316bb860897148158", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-count.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7cbaa1b1e1c4b2aedffd7b185541cc20d61ba770e7ddb068db1371b230327b8d", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-get-element-content-unicode.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3635e6d9ed3c8735c69bdd94961c1d553ba78428f041f297c7017eda55fa52e2", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-get-element-content.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a8a1b7f7542375ec806a24385f2b73c9359f8b7839c115b634a9b4430da7298a", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-mutually-exclusive-attributes.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ac7da5aec1036eb83ead426dd96ec6d31eaeccaf842e79cdd5a9ef74baeac172", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-pretty-print-only.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8b5e11acdf67fb9c058cd2702505465a988e4b52b1cb3be7ae45adda1dfd3395", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-pretty-print.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "464e1ebfd5c4e66743097b5e73ea130595c6c0fced413a52250598ae47aaaef6", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-remove-attribute-nochange.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f32305bf4e3cb3b859cbd431b871017a5ef8d2240c5bb4084f34696ee1032730", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-remove-attribute.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2674349d50096cef7523abf3c2e6ba919b11d14d93a2b0c4f0fc0ec8704c2440", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-remove-element-nochange.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4b1a3ed2ab4af9dc63189ec807f7b468cc2d4b80ea3b6421e1b71127a3d5b5c9", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-remove-element.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0f4083387b79d4c7ae030d9027e4a7838d01250f783290f0d3a88542e2c52777", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-remove-namespaced-attribute-nochange.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c56834fe7ee6542e55b63f8c869463c749891d2bc5c496dd12d568a4d1b3dc7c", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-remove-namespaced-attribute.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a33854d8fecf679f3d76c642fd1379b3c844dbbff6329c44033eca5eefb16e79", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-remove-namespaced-element-nochange.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "393cf5c8c8d8e602289848e184f51f57384ce3543ee2d1f2a2e2d79e3e75c6a8", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-remove-namespaced-element.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "811d231361ffef9be6f36a8b3fe56e4941b50178336a894a0b4887174bddb0f0", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-set-attribute-value-unicode.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6cf55191c2f195079fbeb6270c8b4170f07d8653283beef8888a5e05351448fa", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-set-attribute-value.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "47198cb89249a2c4ed003387b1cd9887ffacc925d42c78ac3a6a0edb15d5af77", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-set-children-elements-level.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "13a9de3dcb2c8f6b8b737d77fb2cca32f98d1d8647cb85b1309e13071e461952", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-set-children-elements-unicode.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a38cc08c8a9a3bf53564a9955fb3387fa64a4bfa37c9d79d49b01297b614b562", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-set-children-elements.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7b97434f0c2a16a4aa3ad6e51ee2d6665f68a2acfba75f30bd793f660d4033d6", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-set-element-value-empty.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "306e870f3299ef224e8888ea3715fa0dc9c69942797fe4486ff03d734d00cfe8", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-set-element-value-unicode.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "dd98195dfb5a4f33831ce14e47e2ac74cafa70282c1f6b187baec61fdddbe6c2", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-set-element-value.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a335de101750c51dbcf94f68764561d04f2a0907ab21ae99ce9a3fea43ef030a", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-set-namespaced-attribute-value.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cdbcc1db25334596337622fbdbe457587be5eeccec5fbbcc84f4cd9f925c7f4d", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-set-namespaced-children-elements.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b264d9c3a40cd702c036714a2342ab876615252435a2e7edb58c90d924a38f7d", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-set-namespaced-element-value.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a0e7d085bdd8ca410bcae7e7cce35b27216509c24de1136f86e89a0e23e6e08c", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/tasks/test-xmlstring.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "11ddcc6fb2e10c2c7f38f95585cf69b4c04161a1a1be1d01d344063ef8bdeb9b", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/xml/vars/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1dfa0429b7bec0c9e705c2b649cd7a1c5a3a9301984b6b61ebb93a932acb0b60", - "format": 1 - }, - { - "name": "tests/integration/targets/xml/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b6e6b3eab89eec157e047b733c9e9c8b2ae7ec87e514ef9057018fee6fca9ba2", - "format": 1 - }, - { - "name": "tests/integration/targets/yarn", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/yarn/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/yarn/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d8fc45c0fa92f9e855eaef341b5d10d9ee2630f93bc4bd041a943aa8c1169b3d", - "format": 1 - }, - { - "name": "tests/integration/targets/yarn/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/yarn/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b265495436a6c3f9542ae62e07d8178e4f24705abc5368238169aa87f30203a5", - "format": 1 - }, - { - "name": "tests/integration/targets/yarn/tasks/run.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "69cedca1e383fb004b707ed3b1363bc9ba6ffd06cfbc44d9e047c8baa394a4ba", - "format": 1 - }, - { - "name": "tests/integration/targets/yarn/templates", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/yarn/templates/package.j2", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "664c8935b09b77dee0b4068cd201aa26923567264026632473acaec6f90fc4b9", - "format": 1 - }, - { - "name": "tests/integration/targets/yarn/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4e99bd95e5ccbbc3157a4036b5a91bd46bb22457eca85b713b441cb0d4f0b9e5", - "format": 1 - }, - { - "name": "tests/integration/targets/yum_versionlock", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/yum_versionlock/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/yum_versionlock/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4a9c743574545c7d170ccaf37f4359e6d922a6b62461b4e63389b84826290db1", - "format": 1 - }, - { - "name": "tests/integration/targets/yum_versionlock/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c1086be775ef488ec8b2373381fecc0f4383a03b80abb70a520198fe460e16df", - "format": 1 - }, - { - "name": "tests/integration/targets/zypper", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/zypper/files", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/zypper/files/empty.spec", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "77f59f1c05484da8dd181c8158c7ac48b5540a9a308c5f3872c52960c6317450", - "format": 1 - }, - { - "name": "tests/integration/targets/zypper/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/zypper/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1e8d632f9db7209967c5b2f6d734bede09841acc7b898dafc19f31c72cee9929", - "format": 1 - }, - { - "name": "tests/integration/targets/zypper/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/zypper/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "978cdbad8d0ab5434c81bf5ebdaa7b66b1b99388a742abc871dacf11709311c5", - "format": 1 - }, - { - "name": "tests/integration/targets/zypper/tasks/zypper.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "956d4cfb98b14577dcbba4615a22ca3adc54ae919f3dbd7f4affea1ffb1d8d60", - "format": 1 - }, - { - "name": "tests/integration/targets/zypper/templates", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/zypper/templates/duplicate.spec.j2", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c5401528520b9f3ee1e8ebba24e66ad649c2e95130f184508d023b82be001c7b", - "format": 1 - }, - { - "name": "tests/integration/targets/zypper/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d502a4aa4e2eb90aa8ca5b3574abb191b74f24f1f4d15266457c616538367c26", - "format": 1 - }, - { - "name": "tests/integration/targets/zypper_repository", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/zypper_repository/files", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/zypper_repository/files/systemsmanagement_Uyuni_Utils.repo", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b306054fb8fa3adc485920c54b66b47255846e7cf279569c53a03903b46fa4b7", - "format": 1 - }, - { - "name": "tests/integration/targets/zypper_repository/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/zypper_repository/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1e8d632f9db7209967c5b2f6d734bede09841acc7b898dafc19f31c72cee9929", - "format": 1 - }, - { - "name": "tests/integration/targets/zypper_repository/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/zypper_repository/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "925247336b1a937fc2d08e3c873d4598d214a3796b51da04d3835321bc41ce30", - "format": 1 - }, - { - "name": "tests/integration/targets/zypper_repository/tasks/test.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f2e1d876236ad6d7016d9b17f72b16d759b6860927fbe4ec225531ec83667ec4", - "format": 1 - }, - { - "name": "tests/integration/targets/zypper_repository/tasks/zypper_repository.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4c5ee78440146ebc0bcac0e6d6f920a19ef05ca479174cd99b163f647ee136bd", - "format": 1 - }, - { - "name": "tests/integration/targets/zypper_repository/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d502a4aa4e2eb90aa8ca5b3574abb191b74f24f1f4d15266457c616538367c26", - "format": 1 - }, - { - "name": "tests/integration/targets/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/sanity", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/sanity/extra", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/sanity/extra/aliases.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d825699416551c4276f44b44c20aeef37c9b357d7711c55cd15ee12dea293907", - "format": 1 - }, - { - "name": "tests/sanity/extra/aliases.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f2116ad77622cd360af4506c299d64679e2346b58f03359a344a871ff8247b1b", - "format": 1 - }, - { - "name": "tests/sanity/extra/botmeta.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e56a83a42ca5002a300003f6ea560a036c684768e839c228af08ce501ac03b89", - "format": 1 - }, - { - "name": "tests/sanity/extra/botmeta.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f4de513bc44d78d4805850530229cb86922f5430c8fa276d0c329c4430f4a1fc", - "format": 1 - }, - { - "name": "tests/sanity/extra/extra-docs.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "48c2f15e3fe2e2bcd9b8c8bd7f5f1643d78f16b822d63befd88795fe29bdac3c", - "format": 1 - }, - { - "name": "tests/sanity/extra/extra-docs.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2e5e4a1b1fa91ad02620188230cc87a7e4f89532e572168590dc93227050f98c", - "format": 1 - }, - { - "name": "tests/sanity/extra/no-unwanted-files.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a3d3b17f699b042958c7cd845a9d685bc935d83062e0bcf077f2c7200e2c0bac", - "format": 1 - }, - { - "name": "tests/sanity/extra/no-unwanted-files.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "15053d269214d7c1427fe861351a3bca96cf1ff9026f8aa8e8c73ba5f3cbd95d", - "format": 1 - }, - { - "name": "tests/sanity/ignore-2.10.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8f0361a30842d521f9fa3377121416fb10cac52299375d2e145295ae81e0af56", - "format": 1 - }, - { - "name": "tests/sanity/ignore-2.11.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f9261ff244e74897601beadd8c3baa049e8bd209371b4524acf8d21990052210", - "format": 1 - }, - { - "name": "tests/sanity/ignore-2.12.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5d301f91150599b4bb12fa753980841c5f3d2d75dcad6bb60d19066c81dc1d78", - "format": 1 - }, - { - "name": "tests/sanity/ignore-2.13.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5d301f91150599b4bb12fa753980841c5f3d2d75dcad6bb60d19066c81dc1d78", - "format": 1 - }, - { - "name": "tests/sanity/ignore-2.9.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "84c5951d7a09b87481a326a1ee032cf759090ff113e04a8716db5ce930db7ceb", - "format": 1 - }, - { - "name": "tests/unit", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/compat", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/compat/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/compat/builtins.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0ca4cac919e166b25e601e11acb01f6957dddd574ff0a62569cb994a5ecb63e1", - "format": 1 - }, - { - "name": "tests/unit/compat/mock.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0af958450cf6de3fbafe94b1111eae8ba5a8dbe1d785ffbb9df81f26e4946d99", - "format": 1 - }, - { - "name": "tests/unit/compat/unittest.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5401a046e5ce71fa19b6d905abd0f9bdf816c0c635f7bdda6730b3ef06e67096", - "format": 1 - }, - { - "name": "tests/unit/mock", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/mock/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/mock/loader.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3452ac615f89c99a76d1df4ab1ad84d1aff546e5b5fde18034a241239690d05a", - "format": 1 - }, - { - "name": "tests/unit/mock/path.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f048a12629a6297a676ea56529ecf766cff30bcaa873c6659ac5b7f6e29472b1", - "format": 1 - }, - { - "name": "tests/unit/mock/procenv.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e79b2fe520af92318da175c231296e16bf047842a93b1bfa4e1a5afc453baa03", - "format": 1 - }, - { - "name": "tests/unit/mock/vault_helper.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0562db7b9972e6378701e3492c623e5f881732c4792e096032b72c2e54d22298", - "format": 1 - }, - { - "name": "tests/unit/mock/yaml_helper.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cd95a4807e52e9123a8d40132a5f52b75cbc1496e1a32b104b2655bf631cfee4", - "format": 1 - }, - { - "name": "tests/unit/plugins", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/become", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/become/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/become/conftest.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "917507083eead1d34596d6b31a4a3600a780f477bc8856ef326c4b18a1dd2053", - "format": 1 - }, - { - "name": "tests/unit/plugins/become/helper.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6da8cd096bd56d3f206b879eaaa93fd1732df17ba15d10a524549df46185dafc", - "format": 1 - }, - { - "name": "tests/unit/plugins/become/test_doas.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2bdc1de37449ed84ce41b44565a575e8ee619a055ced31cf62d2c55a44b64f99", - "format": 1 - }, - { - "name": "tests/unit/plugins/become/test_dzdo.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f26500ca45cdedc6a217cdd18e0a1fdfeff72415c006cf78f0f4c25476b98ff7", - "format": 1 - }, - { - "name": "tests/unit/plugins/become/test_ksu.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b7b1b2f3a6e77846a3adab6f323ce7cbcdb0ce65fbc2d4bc8ae66f10e8a8a488", - "format": 1 - }, - { - "name": "tests/unit/plugins/become/test_pbrun.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6f581f310504561f10a22a512343d2ae213e0d73eed950bd79fe35916f56589e", - "format": 1 - }, - { - "name": "tests/unit/plugins/become/test_pfexec.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6e7d4ccdbece51e1c3426e58225426cb3bfd5c6f475243f9dc9554a4a39f2509", - "format": 1 - }, - { - "name": "tests/unit/plugins/become/test_sudosu.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "268e56de2b8fcb36c79368ae1a72d408d845262fbceb7c6bc65844de24d30b50", - "format": 1 - }, - { - "name": "tests/unit/plugins/cache", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/cache/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/cache/test_memcached.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bcfc3a29068b2600ce1ba45b5b9d1ba0beff9e231b6ed491d17eb09f37eb56f3", - "format": 1 - }, - { - "name": "tests/unit/plugins/cache/test_redis.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6175849033bdb3dee3dcf16c0910361ded349f0cf7ca73f29e819d145864d020", - "format": 1 - }, - { - "name": "tests/unit/plugins/callback", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/callback/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/callback/test_elastic.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4ce3edd28ec0cf7c37fdcdc9e4a63c8d670bf4e66ede233df388e32c46f673cb", - "format": 1 - }, - { - "name": "tests/unit/plugins/callback/test_loganalytics.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "766e354e8049ff4e256d02d9f63baeb97b092bee6641bf8361e6c239f57dcd86", - "format": 1 - }, - { - "name": "tests/unit/plugins/callback/test_opentelemetry.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a176001eebc2a4bfbadb591ebfea86e2cf1fac7eb269e5e6a893b196c81cf3ac", - "format": 1 - }, - { - "name": "tests/unit/plugins/callback/test_splunk.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "305ae4d1806d366ac83bb9d03418c5073287e973ddf481264a70fdb781a68623", - "format": 1 - }, - { - "name": "tests/unit/plugins/connection", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/connection/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/connection/test_lxc.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fe90b5d5eb17eab987cd0022995eb85b6c8f0e90d20aee7b8fc0d6945041ab00", - "format": 1 - }, - { - "name": "tests/unit/plugins/inventory", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/inventory/fixtures", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/inventory/fixtures/lxd_inventory.atd", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "162213d31385d92e0c3c9eee2f02189646a384b14132e7a3952afc23ffeb33a4", - "format": 1 - }, - { - "name": "tests/unit/plugins/inventory/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/inventory/test_cobbler.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "97a48c825926b35f9c32303d7c88d0590a43935f2749d5de4f5b0b5ef721d444", - "format": 1 - }, - { - "name": "tests/unit/plugins/inventory/test_icinga2.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ac2ffef2b6843298fe98692a3631a101e4179c59c64306be4d356d9f99b8bab0", - "format": 1 - }, - { - "name": "tests/unit/plugins/inventory/test_linode.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bfa9d219ee27513f7adc5e2e27b57e82e838f5fc473722202160456ba5e52aa2", - "format": 1 - }, - { - "name": "tests/unit/plugins/inventory/test_lxd.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5e1fc38581497412ecc4e9a4d6662995910edc71c7cdfc5c5f0e03b03ed9bd7e", - "format": 1 - }, - { - "name": "tests/unit/plugins/inventory/test_opennebula.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "62664bab411df8055d9a52e9c2dc5033c23366ea857758603d513c995e4ea9b9", - "format": 1 - }, - { - "name": "tests/unit/plugins/inventory/test_proxmox.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2ee244ed220f06691dc75a5b302be5461d124073fb4753eafa6ef9d7b40bc070", - "format": 1 - }, - { - "name": "tests/unit/plugins/inventory/test_stackpath_compute.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "becd4388974334f46b58a19ae404eb50649fe9acba9f3bffed7e6cbf2767f97e", - "format": 1 - }, - { - "name": "tests/unit/plugins/inventory/test_xen_orchestra.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "804e1802b18939d5b2ffe781a5e150b9b0b4c7ac70a61fd3364eb433aee16df0", - "format": 1 - }, - { - "name": "tests/unit/plugins/lookup", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/lookup/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/lookup/test_dependent.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c5caedd0ff8644aa8c62c6b98c8ae0a66d905ee2f20666a047592f7b10171ab4", - "format": 1 - }, - { - "name": "tests/unit/plugins/lookup/test_dsv.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c25e7fc101df073b45b7eb7b048d7bcd8b3f477e6021c27dbe974945fd8299dd", - "format": 1 - }, - { - "name": "tests/unit/plugins/lookup/test_etcd3.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d79104d0074682c5308648f66eabf50569ddc34f32ab916a02a4425077043083", - "format": 1 - }, - { - "name": "tests/unit/plugins/lookup/test_lastpass.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "53b61e83a357f3c9bd28795c8f16238a55f5cd9031783f06f00785d50e02dec8", - "format": 1 - }, - { - "name": "tests/unit/plugins/lookup/test_manifold.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d5d2b651cf2fc37f4f8ff632aea0237ac7fb35ac5f03a643d7cffc5f1ed0db2c", - "format": 1 - }, - { - "name": "tests/unit/plugins/lookup/test_onepassword.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "97052edbff97b2b30d11ab8b147fe733bb382a02db51b4604bf0407a01fe2ef2", - "format": 1 - }, - { - "name": "tests/unit/plugins/lookup/test_revbitspss.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "609ea39bd85c2575bdcb475c2a26ddc85e119bb02fb9f8f6d16a42d4e6aa853d", - "format": 1 - }, - { - "name": "tests/unit/plugins/lookup/test_tss.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7a725b4c2a4e0d32f67d3a3b19bde98df85146309e949842f423db5c52c9dc19", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/cloud", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/cloud/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/cloud/test_backoff.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c43c5418bed4f056752610c28cdc7b9ff535a1c29238996444a21fc6b47502c5", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/hwc", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/hwc/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/hwc/test_dict_comparison.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c844c6b0656694a4828278a84f6ebe4d6850f022076d86aaf3b68c2fac685311", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/hwc/test_hwc_utils.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0fb975491eb25d8f8e74373095a0cd87e12e1a7d6acd4282b1aa1101142f2b87", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/identity", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/identity/keycloak", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/identity/keycloak/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_connect.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6e0678b4f3b5e1a84586ba094c97498c7fae33ef3dd404c736058e314b62b075", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/identity/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/net_tools", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/net_tools/pritunl", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/net_tools/pritunl/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/net_tools/pritunl/test_api.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "726f4d91b7a2f95d1bf59e5481c2a8e46ce79790a9d09c0c39afe2c562cb02eb", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/net_tools/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/remote_management", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/remote_management/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/xenserver", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/xenserver/fixtures", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-facts.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "58e21893fa65459c9869fdbcc9c79299cc01183e3a10cf575cd75a62ff366e58", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-params.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "61a935bdae44191686b63826996abbf2431834febaa54e4a9e523aec016cdd61", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-facts.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "728bad77904f8e3d2539810fd0dfcec6bb24621c78406daf4434dd611042da5e", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-params.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b994736a7e80d02c759c7b19977101c0c04ebc1c8460258f5b96f595b9daf037", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-facts.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "92c62d837dcef25a817ac3a9903d6a430b0deb44848d29ab5ac5bb6eafcda526", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-params.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2a8de78de7ba45268294a48c99a82a957ecb3da299ac9036264308392b14106b", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/xenserver/FakeAnsibleModule.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8249860417aebbb656541d50db86e9b3314c58fd6608aa4cf87b29cb530e1cac", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/xenserver/FakeXenAPI.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cfe4d1e5778e71f0beacda11226b934635d87a2194be94af275d09701f501e01", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/xenserver/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/xenserver/common.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5beba741c380832f920ce31d44c0061e44cd9301469262e080d83336713ac65c", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/xenserver/conftest.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cdc32c1f62b6cd60be98a4b5531ab3231d79055650df266371420afb052c1f93", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/xenserver/test_gather_vm_params_and_facts.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "19e5f84c0f0d0f1316516689819332972c3f21b6175f6e9748a5319b68e5a2ab", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/xenserver/test_get_object_ref.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "51388a0d4511aa3ab1ddf74b2ec0c603ed46e88741d90b8c202725d7c303b89d", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/xenserver/test_misc.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4e7c63d3bbf78b71353572f6ee50a49f633a371b9506523cb5e9541df82837c9", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/xenserver/test_netaddr_functions.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "34b0b4122166239da7c963580b38ee3beb3657815825b57c8c37349fafb55cb9", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/xenserver/test_set_vm_power_state.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5539b93046d0b10ed2823aa1d89efcc6969c154d00b4a92443157f6e4ed75313", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/xenserver/test_wait_for_functions.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3dcd632f7e357295691f1dd4f1c5ef041bc76b28b3362ab91aa1a8b2be8bca08", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/xenserver/test_xapi.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c32fee1f92b6049387e5af9a600b80e302cf08e3831a866af986d70a44700237", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/xenserver/test_xenserverobject.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2c7d8c81a73c9ab1c69a8e610c788a41f2817a56678475a1e36267cf8037b5a6", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/conftest.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c50d5804d586f18df17d15e48332dc0c78239283d0becd6cd7eec8ed8dbd8032", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/test_csv.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "434a6147e2e3d20fb0c773fa83bcb76eeab520745158c79cbbdb021fca271b64", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/test_database.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "dcf067c65ac448adaee89a093902592e7b79a3ae95b5cf47098cd729f7912727", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/test_known_hosts.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "856a89896c248a26157903f168873c8b575ac208c15d4b7071cbcae711ec51c9", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/test_module_helper.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7bee762074a200012e3f7613f9b9bcd778947aa8cff5c317a44dc32bcc2f9bdd", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/test_saslprep.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1bff0807c10f8d51746ee02e3e25654d07004f4b35cad01baacd6b24f3d342bb", - "format": 1 - }, - { - "name": "tests/unit/plugins/module_utils/test_utm_utils.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ac7a58ed47d4ba383a871571bfbbb8e447e42019d555d3c5ccb5f31b79510a33", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/cloud", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/cloud/linode", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/cloud/linode/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/cloud/linode/conftest.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4cb56f4daa028f7157bd3dd75b46162c565beb5120862c1585f890d51223209a", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/cloud/linode/test_linode.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9c1b4d4e134d45209b594a04eda78afc8c7abcfd0a25b61a4c34137db0db6adf", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/cloud/linode/test_linode_v4.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "148d277d41d7dcec822fa651bc589a72362e520f156c201f979d84768237dc4f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/cloud/misc", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/cloud/misc/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/cloud/misc/test_proxmox_kvm.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e2c49840bea31374cd2752508c1738eb38cdb3778b2b0a7bab83a3d87469d210", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/cloud/misc/test_proxmox_snap.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d841cbd18de08fe6679d1ad0a27dd7009e9de0892a938a3ac4d0d0f652086535", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/cloud/misc/test_proxmox_tasks_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "80d995f32b1d8b030952739951a4167f997d1a52bba989650db01dd0f47e8a32", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/cloud/misc/test_terraform.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9e60bfd9c9c35e83426606b13a75e8f6d2adcf1245ed1feae7c3030810061bac", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/cloud/scaleway", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/cloud/scaleway/test_scaleway_private_network.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "312a23cd0fda058815138a619db9141693ebbec38f7402acbec607af12f431ad", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/cloud/xenserver", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/cloud/xenserver/FakeAnsibleModule.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8249860417aebbb656541d50db86e9b3314c58fd6608aa4cf87b29cb530e1cac", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/cloud/xenserver/FakeXenAPI.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cfe4d1e5778e71f0beacda11226b934635d87a2194be94af275d09701f501e01", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/cloud/xenserver/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/cloud/xenserver/common.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "958eafb650d09fa883cc9b3d9cf14d493723d91dcc1da9b9ee57c6dc70bdd57d", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/cloud/xenserver/conftest.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6cc2b211f0a74a9ec3994c584d26b423d3b9cc6671eeed0db7d8821865479d58", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/cloud/xenserver/test_xenserver_guest_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f2599c3f551c4736f1e09629c7e2fcf44b6c8564022bb3dee6a8df2b728ba29f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/cloud/xenserver/test_xenserver_guest_powerstate.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4b6ed5382c32dcf082bb72b27c567214b0da7943fd224f54a674d71142f7b26c", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/cloud/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/database", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/database/misc", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/database/misc/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/database/misc/test_redis_data.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "eee39ec82280d3f7022bb7ff3e1185a65fddcd61e2c12609aa23ace15cbc3f45", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/database/misc/test_redis_data_incr.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "96bd3dc851896186fee6461b2efacd33d31f941f44e31467023bf2eb92928973", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/database/misc/test_redis_data_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "23635e273c72e6ad03076970ba14374d1e6d8062637683493ea2fd3066607237", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/database/misc/test_redis_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "aeac4234cfbb8535ebce9141aef81fa21dfc731b5ee925776b8cae12cceac44f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/database/saphana", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/database/saphana/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/database/saphana/test_hana_query.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2e810f7e83e330a5b02b00f1ab898d54e846e299cbbdd69d08109030e9d735ba", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/database/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/files", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/files/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/files/test_archive.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "109fbb9746b3e00b9acb4e3bcadfc89dbcf52d969ddd9227c481e4bedd75795e", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/files/test_sapcar_extract.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "20f63c642d96d97ff5ab7467ddaf2b921490455bb369a9fd771d6fc18131cf80", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/identity", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/identity/ipa", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/identity/ipa/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/identity/ipa/test_ipa_otpconfig.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9703a225774b826b0f178cf8781bfbbdab4cbf7ba6f37433f00ec4ad940fa2da", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/identity/ipa/test_ipa_otptoken.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ba517f3a51db4c0497e2a0d0a91d02e4e7ad847d979c43befb2fa3f7374833a0", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/identity/ipa/test_ipa_pwpolicy.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7ea0e6554df05d0e32e427eb2b3e0667140c02bdf82d3719da32f3df5001e402", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/identity/keycloak", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/identity/keycloak/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/identity/keycloak/test_keycloak_authentication.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "73a85a14ac98adf5769bf68632ad478fdc67a74b4cf4c7fbe0116221b367714d", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/identity/keycloak/test_keycloak_client.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ada64a3590eb467544b696ef0713e039af89258ee406bc83ef7ea975bc7c4144", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/identity/keycloak/test_keycloak_client_rolemapping.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "456032e70e0d94453cfc8a786e966b2f8e971a94e0b33a2ffda7c2cc8d21891d", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/identity/keycloak/test_keycloak_clientscope.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8feaa49ad0f2e92e628367565d073adffc3da26b90c2256456cfcb18d966842e", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/identity/keycloak/test_keycloak_identity_provider.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1cae35ac5a12f439e22fd06fb4c182884d077d837111550b07e80dc45b83f5e8", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/identity/keycloak/test_keycloak_realm.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0e65ff0e2f5cdd49b8fddbf650cdd4de13bf022993f8dab53a23ec7d0c0494b7", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/identity/keycloak/test_keycloak_realm_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8f7a24d3e87d2f95b5d0de1ddd935525ab773af36faf7e0cc22f32f50214e95c", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/identity/keycloak/test_keycloak_role.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fad6787ad91ea7534129abc9bcabfb946f769b03241f7405640610d3488c4875", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/identity/keycloak/test_keycloak_user_federation.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "31d8a0b3e8230e4b75fb12566b200ef97bf33e832a4a682b7410063fc0aef6f1", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/identity/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/messaging", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/messaging/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/monitoring", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/monitoring/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/monitoring/test_circonus_annotation.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3f8d99c1af7336345ce7027f2e1d5055b156c46fa9aa98241c8bee6619bbffe", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/monitoring/test_datadog_downtime.py.disabled", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9ce888d2663874c4fedef718c0e555adfa305452e7e890757c68ee739f9e3609", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/monitoring/test_icinga2_feature.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "784eaf2c4bb9687f5d501e243a92c970e6332682c10e223c0199f501d19ae05a", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/monitoring/test_monit.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "89d4b9fc28b1836ed677ab50c2875b57c133da78fea0ccd33334aadb644ccd7a", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/monitoring/test_pagerduty.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "51231e2644a421337619f184be06832ccffb8fdd9bc898989a18951df9a515c8", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/monitoring/test_pagerduty_alert.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e86552e30a2074ae8275eb78b2e19376442b9b5c3d59e99b2b91119678875013", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/monitoring/test_pagerduty_change.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c1f5661bedd93f7a7119c04fe3f15629fe77d21b27b3cb2c3f8af72fa2eb2f28", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/monitoring/test_statsd.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9510b952fa3eb04470bc26387967412e9357dcc7e833533fb0a4ebf7f65cec67", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/net_tools", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/net_tools/pritunl", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/net_tools/pritunl/test_pritunl_org.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "080e132c436f7ab1194c69fcdffda7390c5b162f2f2e0aa7c010ab6862bba9bb", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/net_tools/pritunl/test_pritunl_org_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "482ad29c2981c7443a1b5e7726c867ab8a39bd879549d2486ea5bad48b678643", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/net_tools/pritunl/test_pritunl_user.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2c8fec7225e1d485276b68f36825da0f954862626196fd6c932291a32533e497", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/net_tools/pritunl/test_pritunl_user_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "414b651d4ebba223926c1f6320f254a5ac79e21695fc3e16c27bdfb5a632b02f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/net_tools/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/net_tools/test_dnsimple.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "23f506451ff108d1362644d2957ded2b5fa826184f4aed897a95865b352c154e", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/net_tools/test_dnsimple_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "193ae47e0e05f1c385711ddb6e6d3d2b138f712708af2b3748c4a6742e8e9f04", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/net_tools/test_nmcli.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a025f14b41fb7559dc1ed65721df3bf1d5e714a8af51caf564c8a21872c35ee4", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/notification", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/notification/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/notification/test_campfire.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "818eda9c60aa41fce3ed8a7a88df3de57caa7c0c3267670295bd44ee96a0f8be", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/notification/test_discord.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4303a88177ab58c876cac262e1b012a6546d299e0b0d3cf54fc70da77ac4e4a7", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/notification/test_slack.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ea7a7424df543b70c068f8efb29a909a3935d8f7dced00bcec1e969c5fb334c0", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/packaging", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/packaging/language", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/packaging/language/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/packaging/language/test_cpanm.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a74507fb1f23d0d00a38191215142359309f6e015136c12d1ddee26ee47d3096", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/packaging/language/test_gem.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0484019fba974202d150ffafd84fe4ef416641854debf6505581c6eade9b7079", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/packaging/language/test_maven_artifact.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "179825e10a242f386aba87c4c8e1f5740653cd8962b07e37dbd588af37121a13", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/packaging/language/test_npm.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "26444c135b62fcb26cd7d857f32c93b49b88cad3575075719b1bb820669b0e09", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/packaging/os", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/packaging/os/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/packaging/os/conftest.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "504f9b1ae9baf976f44e4ef62ed7c616129d090b3170571279e1bd8d43409be9", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/packaging/os/test_apk.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ad806b3c277e03d16fc7a2506353c46b8574e3cb82504362c6028c33fddc7de5", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/packaging/os/test_homebrew.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1a9a041047a61d0801c467295ebb1e238c530bc7555a1d70f4d59efc306f2549", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/packaging/os/test_homebrew_cask.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7548ab19616febd1347d9324d0d67723e6025617a012b25a485a7eed5c9fcfc3", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/packaging/os/test_macports.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ddd9585216de66bc18b9731523ae67ca2ba352070e58b7515b5b11b80422e2cb", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/packaging/os/test_pacman.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "24ec8cba0acaf52e7cff3de25e4604fc331ea805095959578b1e00169282fa68", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/packaging/os/test_pacman_key.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "29f0850e972b152e926a6b51cc3473b9c28353b04e59f0eee833d867ad687410", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/packaging/os/test_pkgin.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f8ee6102605e34a4de8445118f42e2caccbc164da2672b27ccffe00c72a3f8b1", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/packaging/os/test_redhat_subscription.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "983aa5a904863d04b7a5e0af396c17e742131690ed55d7d26a80a8800b255cb2", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/packaging/os/test_rhn_channel.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3ee90c66745027bbaba686da547bf7d1645f975be32918dfb5e3b7afe69cd71", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/packaging/os/test_rhn_register.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5a6f919936578fb0758385bbbc0e4084930c36985cd808dd9bfb0edfd0503eb5", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/packaging/os/test_rhsm_release.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cf69e341046ff756177406f4964be17c84d38242336692e69c08da324b2955aa", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/packaging/os/test_rpm_ostree_pkg.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "13d047dfa2a3920faba147c2174bf872f4b5763d2902e860dffa7b1f551ee3bc", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/packaging/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/remote_management", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/remote_management/lenovoxcc", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/remote_management/lenovoxcc/test_xcc_redfish_command.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "44b040b74ed7312498b4fff23ebf329f3e1504f3a7558afd3082f4f76bf1bda5", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/remote_management/lxca", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/remote_management/lxca/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/remote_management/lxca/test_lxca_cmms.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6dc89465a3f3f8a766411db1bddd2a0b7ff0d2b39bcf392b3c6a6d9707665e2f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/remote_management/lxca/test_lxca_nodes.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b3d733b956abd26280a2322a9f11c04f52069df59c8c9bfe34af52325af97141", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/remote_management/oneview", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/remote_management/oneview/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/remote_management/oneview/conftest.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9df5d55d4865eec15d7799e2f889a1d539954f248564ca80aa3d38efb7fece3c", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/remote_management/oneview/hpe_test_utils.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e0dc69496825ed8bf2f13d3dff2a27ba98685130a59fa971f1e6e0e9e83aff57", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/remote_management/oneview/oneview_module_loader.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "02ae4523f1519d32f33f51f51420f9a48f75f32b55dbc6ee9ec3ead164e35ab5", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_datacenter_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "aba6e5371afc2bf09637d070139007bcbd45a9db409c6540e31e19ca21cd608d", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_enclosure_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "da6d11c5cacef736152e2289c2621c0ae630f2bcd2de6791c67a432335c77e96", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_ethernet_network.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d36247aa35c29fc75c354e4d0ab45cf689c45c559876b03b3c80a9c5f48ba761", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_ethernet_network_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7e599e39df535672407b42860879a28ae6b83fa32cc4e927bff82ed17ce394ac", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_fc_network.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "19f461d1441aeef057bd0b2fa9a5c0ca98cc973414accf93cf583bef0f7726a7", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_fc_network_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5dd17dfd91d095476e740d32e543dcd51ed436d187fcb4e32e0c3411e4217fff", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_fcoe_network.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9cf8e3857c596a63e055bcafed62b35d05f7f5a2f3a472839493dc3b9dae0222", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_fcoe_network_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "53334346c654b1a2b755bb908aaad43a7691d44b537b550f0ca8f794281ee4b1", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_logical_interconnect_group.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "844029b96cc4dbb5449d89eb357f73e16c0233e557820635293dcb35e1884e63", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_logical_interconnect_group_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6409f79838322fbc92cc554b45398a6d5ee0b6d447ac3107d82806695e11aeb1", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_network_set.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9d98e413d2eb2b62cd4954127914838408793b8182dcf2916dfbe1c79efbffea", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_network_set_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f3182e532dc20243b2bcee01228fd4430de5bf11971afe051650375ace450b46", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_san_manager.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b9c21da4069dd0e9e9d22d07d3969c8b6e0fa638520d990e9c5e88859f626003", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_san_manager_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "daab0258103b8bbc5fe2a0499857127177ded33f1bb8cd423795950537693138", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/remote_management/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/source_control", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/source_control/bitbucket", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/source_control/bitbucket/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/source_control/bitbucket/test_bitbucket_access_key.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "094ff4ee68f8cf526ba9f01191089c7691e1bc64dc8d90941b516f24297ad340", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/source_control/bitbucket/test_bitbucket_pipeline_key_pair.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3a2a6ff5f09c328a20be078dcb07181464186ee07adb1b60506a11785046397b", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/source_control/bitbucket/test_bitbucket_pipeline_known_host.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "97dcef1a318d02c39e09eba33d88bbd4fa80273d689a8ff8f3ddf3528e6c8857", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/source_control/bitbucket/test_bitbucket_pipeline_variable.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "677afe076c6bc19d72d8389fbccdc92bcc46f5e34794e663b81c0d77ccc94c54", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/source_control/github", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/source_control/github/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/source_control/github/test_github_repo.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d796e9c56592e0cef63e5e152ed0a4ffe80a102d7385e6e7bd1d8fbd82935b7d", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/source_control/gitlab", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/source_control/gitlab/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/source_control/gitlab/gitlab.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "74bf2e627f1596b6ca846cde3af9f1abca586e8cfb9f4c72abf2014a7e41a6bb", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/source_control/gitlab/test_gitlab_deploy_key.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "679b2354ea81dd87e4c9302182ba4454f31f9a6e41176c9681ef7f55cdb4b946", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/source_control/gitlab/test_gitlab_group.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "67cc9724895cfcd3dfc98038a102824fa60cfabb0b1669491fba418c4ae17e63", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/source_control/gitlab/test_gitlab_hook.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "259547068279dcbb64a126a0fdf1df9984b18895fe52b69fe82dfd18fa5105c1", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/source_control/gitlab/test_gitlab_project.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "01081c5c1e073e383475bac5ebc69899745760c46dc178372a176c3e60c2f5d7", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/source_control/gitlab/test_gitlab_protected_branch.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1461b0b331c386dd5393b388c35cf1dceaf1c2707acc2db8416d179c793be1d7", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/source_control/gitlab/test_gitlab_runner.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "57adc6a8f1f625731859de384e7bd8ea006066a318f6db6a3f43c8cd1f85b7a7", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/source_control/gitlab/test_gitlab_user.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3aae06f2d12a7dff8b564f8ebd1f073f0acea01378108ac4ca045d721c6830b", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/source_control/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/storage", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/storage/hpe3par", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/storage/hpe3par/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/storage/hpe3par/test_ss_3par_cpg.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "457fe72bb1be07d527f12757607fb8baa504fe99fedd3c5f193f2a961745a67d", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/storage/pmem", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/storage/pmem/test_pmem.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b78afde8141f157a3ddafcceea1ac1919a20f67bb8ae165bf502916a577e1ec5", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/storage/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family.test_no_changes", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family.test_no_changes.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e4f339f4a90e118d5b1b3b4e3fd59a3eb7460d743f3dfb1be34a9e0656f1e117", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up_twice", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up_twice.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0ed67f65186f6ec2c5e67868d7d786e229cba4b67dc9848803e3f6bb844bfedd", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up_twice.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_and_delete_aggi_up", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_and_delete_aggi_up.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "732c90c6031d571c37e260b91d453408a7eb6a9b7bcef6ab5dcad7153dd653a0", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_and_delete_aggi_up.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_aggi_remove_dup", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_aggi_remove_dup.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c82db6ff2a9ce205ec496728645aac7a08c6c699746cd8f981e60c8728062285", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_aggi_remove_dup.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6653eeb79ba4c679c224c9c43b4a0bde5075c9795cd5f446d95560c883df1c67", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_post_up", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b4dccfc3f80598ea3f2f32f6661b3b5fa6997e6d6a0e9e2f3cc4648505ec7f52", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_post_up.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_post_up.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_pre_up", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "53abda66ee9a035f805bb82fc7cfae6e0b17f42663158bd6c7af5fa2b90aea88", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_pre_up.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_pre_up.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "134f64892e64e650bffb29c928ec0ab72e397c122f178417e99cb56fab5c3b2b", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_post_up", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "acfcc70084877cfb0c89871e02d24ec9711b22085f5f5fbe4ca8a69cf0336dcf", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_post_up.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_post_up.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_pre_up", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "08747ecd380971329c1bfe12df432f00c64dbbcf346f4c14ec799dfba42b2b1f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_pre_up.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_pre_up.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_method", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_method.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b20955f532135df51de900c83c10a6dd087cd30d0df0bfc238a5a7e057492778", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_method.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_revert", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_revert.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_revert.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_and_eth0_mtu", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4eaf8c9708b20dab8fc90b8b2b5716167e2bc92c1c1b0638ca82e11323f78199", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "dc4f1ab45fe950aaa0dd6e61e3eb13423b0e1d98202a2f2b15cf78458eff5c48", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_slaves", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_slaves.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4fac1d8f53319085621e778b7012c376068ede405dd18f2a8a1a06a5f378b00a", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_slaves.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp.test_no_changes", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp.test_no_changes.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e4f339f4a90e118d5b1b3b4e3fd59a3eb7460d743f3dfb1be34a9e0656f1e117", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up_twice", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up_twice.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0ed67f65186f6ec2c5e67868d7d786e229cba4b67dc9848803e3f6bb844bfedd", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up_twice.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_and_delete_aggi_up", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "732c90c6031d571c37e260b91d453408a7eb6a9b7bcef6ab5dcad7153dd653a0", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_aggi_remove_dup", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_aggi_remove_dup.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c82db6ff2a9ce205ec496728645aac7a08c6c699746cd8f981e60c8728062285", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_aggi_remove_dup.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1fda07e9a6f93949f6f53ba8b71054114024b9d1d612c4455b1ca5effe630e5e", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_post_up", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "529e2e7b36f6ec834edb09878ead526156aa9d5349a5cedc1194796d30c7b7e4", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_post_up.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_post_up.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_pre_up", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9db4000a5df22bf6923e3c3fae4171698ec097639c4e94297297af729fc0dbe7", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_pre_up.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_pre_up.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0287f7b3a5351740d00394005ce8f49ae1a13484eaafb4c41776acf4e56c706d", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_post_up", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_post_up.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "96446e58502f5e9c1e50b4b3b64d639eed3afa6958e3aa5531225a24b8a94063", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_post_up.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_pre_up", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_pre_up.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b05dd67b937088e5673b7fb10978bfc40f35eb4d5f5f5682936a9c1a5053db99", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_pre_up.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_method", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_method.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b20955f532135df51de900c83c10a6dd087cd30d0df0bfc238a5a7e057492778", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_method.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_revert", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_revert.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_revert.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "70b95830220d518dae6662f2e1ca836dd9c8adc1823351048cc53db8c865c33a", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "dc4f1ab45fe950aaa0dd6e61e3eb13423b0e1d98202a2f2b15cf78458eff5c48", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_slaves", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_slaves.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4fac1d8f53319085621e778b7012c376068ede405dd18f2a8a1a06a5f378b00a", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_slaves.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com.test_no_changes", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com.test_no_changes.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7931947554451b1a241d07eacac42d91143414f385e5ed6e99b5c6039d26fb0c", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up_twice", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7931947554451b1a241d07eacac42d91143414f385e5ed6e99b5c6039d26fb0c", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up_twice.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up_twice.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_and_delete_aggi_up", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_and_delete_aggi_up.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_and_delete_aggi_up.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_aggi_remove_dup", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7931947554451b1a241d07eacac42d91143414f385e5ed6e99b5c6039d26fb0c", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_aggi_remove_dup.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_aggi_remove_dup.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8ac948a5ada90b50ea34d1e31ed4657f220a7153ee2908b880f3dbcf4b1b417a", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_post_up", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_post_up.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "93e09e5b99049be103115e7ede6022cfd51cff8543cfc4f2552f5315e9e7ea75", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_post_up.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_pre_up", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_pre_up.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a5bcf21eb70f131e027c0a1236d2264b0db9de60c2d8ac9df860b83839e7a757", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_pre_up.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0287f7b3a5351740d00394005ce8f49ae1a13484eaafb4c41776acf4e56c706d", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_post_up", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_post_up.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "96446e58502f5e9c1e50b4b3b64d639eed3afa6958e3aa5531225a24b8a94063", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_post_up.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_pre_up", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_pre_up.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b05dd67b937088e5673b7fb10978bfc40f35eb4d5f5f5682936a9c1a5053db99", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_pre_up.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_method", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "eb32a11d2175d165ac30d4d96265aa7890de42aad1e4c03fe862db31a9b609f6", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_method.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_method.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7df04865747cdaf41c044674909f7f9d789de4c721aab7638549d28106f4eb7e", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_revert", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_revert.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "806e3459fe842c37406690b7ea1c112832ac485e8e10876495c671241ae7ab29", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_revert.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "07945be2848b249d636ea429313c539ea4c9f921780e1d912b6472561821143c", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ac877a74278c9ed870b0358447d9c05e8dc910d4b3594bf04c63699d16d8f688", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_slaves", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4d65afd09be4ed2e70dadbbcc3691e8170b1e819256795dfcffb128a41a880d3", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_slaves.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_slaves.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup.test_no_changes", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup.test_no_changes.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f3900942f0fe09bd906f70cb99b75be5c6b5f0081a0a06cb8efd66630f8b89fe", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_add_aggi_up", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_add_aggi_up.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_add_aggi_up.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f3900942f0fe09bd906f70cb99b75be5c6b5f0081a0a06cb8efd66630f8b89fe", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_add_aggi_up_twice", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_add_aggi_up_twice.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_add_aggi_up_twice.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f3900942f0fe09bd906f70cb99b75be5c6b5f0081a0a06cb8efd66630f8b89fe", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_add_and_delete_aggi_up", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "691c3c19b3d8ad7ab347c24c006da07ed165f4f6161216dfb90da0f2ac922768", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_add_and_delete_aggi_up.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_add_and_delete_aggi_up.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f3900942f0fe09bd906f70cb99b75be5c6b5f0081a0a06cb8efd66630f8b89fe", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_aggi_remove_dup", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f98b88779a53ef7a5c4b2dbcdf2229493bb1b9eff316d9b0fab32e2bf45ca774", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_aggi_remove_dup.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_aggi_remove_dup.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f3900942f0fe09bd906f70cb99b75be5c6b5f0081a0a06cb8efd66630f8b89fe", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv4", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv4.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8ac948a5ada90b50ea34d1e31ed4657f220a7153ee2908b880f3dbcf4b1b417a", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv4.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f3900942f0fe09bd906f70cb99b75be5c6b5f0081a0a06cb8efd66630f8b89fe", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv4_post_up", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv4_post_up.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "93e09e5b99049be103115e7ede6022cfd51cff8543cfc4f2552f5315e9e7ea75", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv4_post_up.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f3900942f0fe09bd906f70cb99b75be5c6b5f0081a0a06cb8efd66630f8b89fe", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv4_pre_up", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv4_pre_up.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a5bcf21eb70f131e027c0a1236d2264b0db9de60c2d8ac9df860b83839e7a757", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv4_pre_up.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f3900942f0fe09bd906f70cb99b75be5c6b5f0081a0a06cb8efd66630f8b89fe", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv6", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv6.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0287f7b3a5351740d00394005ce8f49ae1a13484eaafb4c41776acf4e56c706d", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv6.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f3900942f0fe09bd906f70cb99b75be5c6b5f0081a0a06cb8efd66630f8b89fe", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv6_post_up", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv6_post_up.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "96446e58502f5e9c1e50b4b3b64d639eed3afa6958e3aa5531225a24b8a94063", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv6_post_up.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f3900942f0fe09bd906f70cb99b75be5c6b5f0081a0a06cb8efd66630f8b89fe", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv6_pre_up", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv6_pre_up.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b05dd67b937088e5673b7fb10978bfc40f35eb4d5f5f5682936a9c1a5053db99", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv6_pre_up.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f3900942f0fe09bd906f70cb99b75be5c6b5f0081a0a06cb8efd66630f8b89fe", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_method", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_method.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b20955f532135df51de900c83c10a6dd087cd30d0df0bfc238a5a7e057492778", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_method.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f3900942f0fe09bd906f70cb99b75be5c6b5f0081a0a06cb8efd66630f8b89fe", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_revert", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_revert.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "806e3459fe842c37406690b7ea1c112832ac485e8e10876495c671241ae7ab29", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_revert.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f3900942f0fe09bd906f70cb99b75be5c6b5f0081a0a06cb8efd66630f8b89fe", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "16edb798abcd4c903a6812211f3b9f3ee149161f86a0036af50ce1df0f7b224a", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ac877a74278c9ed870b0358447d9c05e8dc910d4b3594bf04c63699d16d8f688", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f3900942f0fe09bd906f70cb99b75be5c6b5f0081a0a06cb8efd66630f8b89fe", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_set_aggi_slaves", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3b4f53dddf4a96187f7318bbc97ed3774b0f66b870a3e1cc0dfc2862832fa516", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_set_aggi_slaves.exceptions.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_set_aggi_slaves.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f3900942f0fe09bd906f70cb99b75be5c6b5f0081a0a06cb8efd66630f8b89fe", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/input", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/input/address_family", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/input/default_dhcp", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/input/servers.com", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/input/up_down_dup", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/README.md", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1df465805a0f52344c2272a514db0c9d9b2187e851c0cf58b985b012aeb29289", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/interfaces_file/test_interfaces_file.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "95a94bc4c73e8018ff73d9930ebfa34d2bc441319619a63adcab35b1393cec18", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/test_java_keystore.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d3c966aeb2a8cd93f2fe6b205edda80f08022f9d39b3e03afb7b32c67b02d90e", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/test_modprobe.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b3436221c926fc320aac044be960dcc9804cff9273c7c5469c8f301466400b4d", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/test_pamd.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c3b765fdcdbfdae2c24f92e5b3b35e9f1247f3f3456d1d24a075941ea7eceb95", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/test_parted.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d9867bddb7cc565543bdfc4424b582ae22257f23d7f4b912b19c6eac25c2aa59", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/test_sap_task_list_execute.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e16d645d6fde15d7d5dce315d4c9ee7c9bc124d77bf1646d4c25e96d7e9015fb", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/test_solaris_zone.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "97894d2e554664a5b1ae9654cf55276e8d8ea302c12b1a52537c1315906f604f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/test_sysupgrade.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d64272139629294cb611a70cc5bc8ab03efeef5a8c20f306f0bf109ce5f57d32", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/test_ufw.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "df3d10c68232b533ce8a18ce408ebb8b8a5b7e5bf5bbdbe0c5d6a800ed6cbdc3", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/test_xfconf.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9791a52ec609c94ad8c194e00d4febb881024d2757d87b288ad61aa6682faa95", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/system/test_xfconf_info.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c9bca59b7f036aef685fee67c7eb5be3661c63798ba1149ad4ffb84f1c3e3c90", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/web_infrastructure", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/web_infrastructure/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/web_infrastructure/test_apache2_module.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cd9ebad3842220ea8251164d45b9cb1d85197ef69cd1e76f80621bf396960d8f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/web_infrastructure/test_jenkins_build.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6b8417ebd37e3c8e8b020bee5c2fc85c2f4eddedf7035623f7de2d9c25c6879d", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/web_infrastructure/test_jenkins_plugin.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0e11908d05982be48f663298dfa98903502893e1301e0b4edb970e594306998c", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/conftest.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b1465f0ed10cc15a6d7578fc527055c28729c9fa9d94856eafc4aada3b3f42a6", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/utils.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3ebeccd641cf5de2b25ae1bf4e153492bb499c641c5b5465ca25168f08b1a1ac", - "format": 1 - }, - { - "name": "tests/unit/plugins/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/__init__.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/unit/requirements.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0afbc983609066c6ad6d21722430516d6f7b1c1751f6d57606195c0ca500f0f0", - "format": 1 - }, - { - "name": "tests/utils", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/utils/shippable", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/utils/shippable/aix.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "388d8b9bd2a30f80ca3ee9dc64b629b9ec777383c92edff0165d11d785d4f3a4", - "format": 1 - }, - { - "name": "tests/utils/shippable/freebsd.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "388d8b9bd2a30f80ca3ee9dc64b629b9ec777383c92edff0165d11d785d4f3a4", - "format": 1 - }, - { - "name": "tests/utils/shippable/macos.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "388d8b9bd2a30f80ca3ee9dc64b629b9ec777383c92edff0165d11d785d4f3a4", - "format": 1 - }, - { - "name": "tests/utils/shippable/osx.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "388d8b9bd2a30f80ca3ee9dc64b629b9ec777383c92edff0165d11d785d4f3a4", - "format": 1 - }, - { - "name": "tests/utils/shippable/rhel.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "388d8b9bd2a30f80ca3ee9dc64b629b9ec777383c92edff0165d11d785d4f3a4", - "format": 1 - }, - { - "name": "tests/utils/shippable/cloud.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "dd953f7e779b9962e76492c389142e03174e84a8115f53e56628e2af9e66b818", - "format": 1 - }, - { - "name": "tests/utils/shippable/linux-community.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4a48ce3df89f871db4f26f1a1ae16a362ff8219be874fd150037866f7e0fb64d", - "format": 1 - }, - { - "name": "tests/utils/shippable/linux.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "07aa5e07a0b732a671bf9fdadfe073dd310b81857b897328ce2fa829e2c76315", - "format": 1 - }, - { - "name": "tests/utils/shippable/remote.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "388d8b9bd2a30f80ca3ee9dc64b629b9ec777383c92edff0165d11d785d4f3a4", - "format": 1 - }, - { - "name": "tests/utils/shippable/sanity.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6920f7ea186e75399d34231105c81a523ea5ff938d11e1f425d4fc7bf5d013b8", - "format": 1 - }, - { - "name": "tests/utils/shippable/shippable.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "346be0b4487b58d1848ffa2ac6c9ce3b2fb1c5b719b80a79542f790124b975b5", - "format": 1 - }, - { - "name": "tests/utils/shippable/units.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a1375f7024d5e574f8daabab3b3f3a0aeb72b2abc6b65854e150b0479fb19a84", - "format": 1 - }, - { - "name": "tests/utils/constraints.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e37959909060dc6d51fbcf125a021df0889954e7cd3b2f5721a88709a1dcee78", - "format": 1 - }, - { - "name": "tests/.gitignore", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b5726d3ec9335a09c124469eca039523847a6b0f08a083efaefd002b83326600", - "format": 1 - }, - { - "name": "tests/config.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "79299e4b233d86b7d878b2b35b6548347c28fd71a1166078a9958e6d8e6749c7", - "format": 1 - }, - { - "name": "tests/requirements.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3b49e42f53135c80834a1472578c15823c22181988ebf3da36c28389c690d9f7", - "format": 1 - }, - { - "name": ".gitignore", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a3ff9f861480bfc0c17c530909c12e4cce972529d64f9d1abd8d8a8ac0a54c97", - "format": 1 - }, - { - "name": "CHANGELOG.rst", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "da7e93a8de28ead878a2eb528a5a50e6b3718b871c7dc958154ba66088e0d05f", - "format": 1 - }, - { - "name": "CONTRIBUTING.md", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1989d42706447d097ab9288cc3f5180ead69d96b4f86b74bb6eb8c1252aa947c", - "format": 1 - }, - { - "name": "COPYING", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0ae0485a5bd37a63e63603596417e4eb0e653334fa6c7f932ca3a0e85d4af227", - "format": 1 - }, - { - "name": "README.md", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a5aedf8ce17c36b016fccdd4966f57aea7028c0b0620065b79024e9eb56c49ca", - "format": 1 - }, - { - "name": "commit-rights.md", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5e2b9e5fde379299fff928bd16b21f9d8bd83744d228f8dc719f7c478080ac1e", - "format": 1 - } - ], - "format": 1 -} \ No newline at end of file diff --git a/ansible_collections/community/general/MANIFEST.json b/ansible_collections/community/general/MANIFEST.json deleted file mode 100644 index 4742a4f8..00000000 --- a/ansible_collections/community/general/MANIFEST.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "collection_info": { - "namespace": "community", - "name": "general", - "version": "4.6.1", - "authors": [ - "Ansible (https://github.com/ansible)" - ], - "readme": "README.md", - "tags": [ - "community" - ], - "description": null, - "license": [], - "license_file": "COPYING", - "dependencies": {}, - "repository": "https://github.com/ansible-collections/community.general", - "documentation": "https://docs.ansible.com/ansible/latest/collections/community/general/", - "homepage": "https://github.com/ansible-collections/community.general", - "issues": "https://github.com/ansible-collections/community.general/issues" - }, - "file_manifest_file": { - "name": "FILES.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cb0591f9fb0d5cf7f64a85aa53cbfc5e62b3ea8c4233de5bdfb1f1230c884ea0", - "format": 1 - }, - "format": 1 -} \ No newline at end of file diff --git a/ansible_collections/community/general/README.md b/ansible_collections/community/general/README.md deleted file mode 100644 index d417466a..00000000 --- a/ansible_collections/community/general/README.md +++ /dev/null @@ -1,130 +0,0 @@ -# Community General Collection - -[![Build Status](https://dev.azure.com/ansible/community.general/_apis/build/status/CI?branchName=stable-4)](https://dev.azure.com/ansible/community.general/_build?definitionId=31) -[![Codecov](https://img.shields.io/codecov/c/github/ansible-collections/community.general)](https://codecov.io/gh/ansible-collections/community.general) - -This repository contains the `community.general` Ansible Collection. The collection is a part of the Ansible package and includes many modules and plugins supported by Ansible community which are not part of more specialized community collections. - -You can find [documentation for this collection on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/). - -Please note that this collection does **not** support Windows targets. Only connection plugins included in this collection might support Windows targets, and will explicitly mention that in their documentation if they do so. - -## Code of Conduct - -We follow [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) in all our interactions within this project. - -If you encounter abusive behavior violating the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html), please refer to the [policy violations](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html#policy-violations) section of the Code of Conduct for information on how to raise a complaint. - -## Tested with Ansible - -Tested with the current Ansible 2.9, ansible-base 2.10, ansible-core 2.11, ansible-core 2.12 releases and the current development version of ansible-core. Ansible versions before 2.9.10 are not supported. - -## External requirements - -Some modules and plugins require external libraries. Please check the requirements for each plugin or module you use in the documentation to find out which requirements are needed. - -## Included content - -Please check the included content on the [Ansible Galaxy page for this collection](https://galaxy.ansible.com/community/general) or the [documentation on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/). - -## Using this collection - -This collection is shipped with the Ansible package. So if you have it installed, no more action is required. - -If you have a minimal installation (only Ansible Core installed) or you want to use the latest version of the collection along with the whole Ansible package, you need to install the collection from [Ansible Galaxy](https://galaxy.ansible.com/community/general) manually with the `ansible-galaxy` command-line tool: - - ansible-galaxy collection install community.general - -You can also include it in a `requirements.yml` file and install it via `ansible-galaxy collection install -r requirements.yml` using the format: - -```yaml -collections: -- name: community.general -``` - -Note that if you install the collection manually, it will not be upgraded automatically when you upgrade the Ansible package. To upgrade the collection to the latest available version, run the following command: - -```bash -ansible-galaxy collection install community.general --upgrade -``` - -You can also install a specific version of the collection, for example, if you need to downgrade when something is broken in the latest version (please report an issue in this repository). Use the following syntax where `X.Y.Z` can be any [available version](https://galaxy.ansible.com/community/general): - -```bash -ansible-galaxy collection install community.general:==X.Y.Z -``` - -See [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html) for more details. - -## Contributing to this collection - -The content of this collection is made by good people just like you, a community of individuals collaborating on making the world better through developing automation software. - -We are actively accepting new contributors. - -All types of contributions are very welcome. - -You don't know how to start? Refer to our [contribution guide](https://github.com/ansible-collections/community.general/blob/stable-4/CONTRIBUTING.md)! - -The current maintainers are listed in the [commit-rights.md](https://github.com/ansible-collections/community.general/blob/stable-4/commit-rights.md#people) file. If you have questions or need help, feel free to mention them in the proposals. - -You can find more information in the [developer guide for collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections), and in the [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html). - -Also for some notes specific to this collection see [our CONTRIBUTING documentation](https://github.com/ansible-collections/community.general/blob/stable-4/CONTRIBUTING.md). - -### Running tests - -See [here](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#testing-collections). - -## Collection maintenance - -To learn how to maintain / become a maintainer of this collection, refer to: - -* [Committer guidelines](https://github.com/ansible-collections/community.general/blob/stable-4/commit-rights.md). -* [Maintainer guidelines](https://github.com/ansible/community-docs/blob/main/maintaining.rst). - -It is necessary for maintainers of this collection to be subscribed to: - -* The collection itself (the `Watch` button → `All Activity` in the upper right corner of the repository's homepage). -* The "Changes Impacting Collection Contributors and Maintainers" [issue](https://github.com/ansible-collections/overview/issues/45). - -They also should be subscribed to Ansible's [The Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn). - -## Communication - -We announce important development changes and releases through Ansible's [The Bullhorn newsletter](https://eepurl.com/gZmiEP). If you are a collection developer, be sure you are subscribed. - -Join us in the `#ansible` (general use questions and support), `#ansible-community` (community and collection development questions), and other [IRC channels](https://docs.ansible.com/ansible/devel/community/communication.html#irc-channels) on [Libera.chat](https://libera.chat). - -We take part in the global quarterly [Ansible Contributor Summit](https://github.com/ansible/community/wiki/Contributor-Summit) virtually or in-person. Track [The Bullhorn newsletter](https://eepurl.com/gZmiEP) and join us. - -For more information about communities, meetings and agendas see [Community Wiki](https://github.com/ansible/community/wiki/Community). - -For more information about communication, refer to Ansible's the [Communication guide](https://docs.ansible.com/ansible/devel/community/communication.html). - -## Publishing New Version - -See the [Releasing guidelines](https://github.com/ansible/community-docs/blob/main/releasing_collections.rst) to learn how to release this collection. - -## Release notes - -See the [changelog](https://github.com/ansible-collections/community.general/blob/stable-4/CHANGELOG.rst). - -## Roadmap - -In general, we plan to release a major version every six months, and minor versions every two months. Major versions can contain breaking changes, while minor versions only contain new features and bugfixes. - -See [this issue](https://github.com/ansible-collections/community.general/issues/582) for information on releasing, versioning, and deprecation. - -## More information - -- [Ansible Collection overview](https://github.com/ansible-collections/overview) -- [Ansible User guide](https://docs.ansible.com/ansible/latest/user_guide/index.html) -- [Ansible Developer guide](https://docs.ansible.com/ansible/latest/dev_guide/index.html) -- [Ansible Community code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) - -## Licensing - -GNU General Public License v3.0 or later. - -See [COPYING](https://www.gnu.org/licenses/gpl-3.0.txt) to see the full text. diff --git a/ansible_collections/community/general/changelogs/.gitignore b/ansible_collections/community/general/changelogs/.gitignore deleted file mode 100644 index 6be6b533..00000000 --- a/ansible_collections/community/general/changelogs/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/.plugin-cache.yaml diff --git a/ansible_collections/community/general/changelogs/changelog.yaml b/ansible_collections/community/general/changelogs/changelog.yaml deleted file mode 100644 index 2a42f45c..00000000 --- a/ansible_collections/community/general/changelogs/changelog.yaml +++ /dev/null @@ -1,1605 +0,0 @@ -ancestor: 3.0.0 -releases: - 4.0.0: - changes: - breaking_changes: - - archive - adding idempotency checks for changes to file names and content - within the ``destination`` file (https://github.com/ansible-collections/community.general/pull/3075). - - lxd inventory plugin - when used with Python 2, the plugin now needs ``ipaddress`` - installed `from pypi `_ (https://github.com/ansible-collections/community.general/pull/2441). - - scaleway_security_group_rule - when used with Python 2, the module now needs - ``ipaddress`` installed `from pypi `_ - (https://github.com/ansible-collections/community.general/pull/2441). - bugfixes: - - _mount module utils - fixed the sanity checks (https://github.com/ansible-collections/community.general/pull/2883). - - ali_instance_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - ansible_galaxy_install - the output value ``cmd_args`` was bringing the intermediate - command used to gather the state, instead of the command that actually performed - the state change (https://github.com/ansible-collections/community.general/pull/3655). - - apache2_module - fix ``a2enmod``/``a2dismod`` detection, and error message - when not found (https://github.com/ansible-collections/community.general/issues/3253). - - archive - fixed ``exclude_path`` values causing incorrect archive root (https://github.com/ansible-collections/community.general/pull/2816). - - archive - fixed improper file names for single file zip archives (https://github.com/ansible-collections/community.general/issues/2818). - - archive - fixed incorrect ``state`` result value documentation (https://github.com/ansible-collections/community.general/pull/2816). - - archive - fixed task failure when using the ``remove`` option with a ``path`` - containing nested files for ``format``s other than ``zip`` (https://github.com/ansible-collections/community.general/issues/2919). - - archive - fixing archive root determination when longest common root is ``/`` - (https://github.com/ansible-collections/community.general/pull/3036). - - composer - use ``no-interaction`` option when discovering available options - to avoid an issue where composer hangs (https://github.com/ansible-collections/community.general/pull/2348). - - consul_acl - update the hcl allowlist to include all supported options (https://github.com/ansible-collections/community.general/pull/2495). - - consul_kv lookup plugin - allow to set ``recurse``, ``index``, ``datacenter`` - and ``token`` as keyword arguments (https://github.com/ansible-collections/community.general/issues/2124). - - copr - fix chroot naming issues, ``centos-stream`` changed naming to ``centos-stream-`` - (for exmaple ``centos-stream-8``) (https://github.com/ansible-collections/community.general/issues/2084, - https://github.com/ansible-collections/community.general/pull/3237). - - cpanm - also use ``LC_ALL`` to enforce locale choice (https://github.com/ansible-collections/community.general/pull/2731). - - deploy_helper - improved parameter checking by using standard Ansible construct - (https://github.com/ansible-collections/community.general/pull/3104). - - django_manage - argument ``command`` is being splitted again as it should - (https://github.com/ansible-collections/community.general/issues/3215). - - django_manage - parameters ``apps`` and ``fixtures`` are now splitted instead - of being used as a single argument (https://github.com/ansible-collections/community.general/issues/3333). - - django_manage - refactor to call ``run_command()`` passing command as a list - instead of string (https://github.com/ansible-collections/community.general/pull/3098). - - ejabberd_user - replaced in-code check with ``required_if``, using ``get_bin_path()`` - for the command, passing args to ``run_command()`` as list instead of string - (https://github.com/ansible-collections/community.general/pull/3093). - - filesystem - repair ``reiserfs`` fstype support after adding it to integration - tests (https://github.com/ansible-collections/community.general/pull/2472). - - gitlab_deploy_key - fix idempotency on projects with multiple deploy keys - (https://github.com/ansible-collections/community.general/pull/3473). - - gitlab_deploy_key - fix the SSH Deploy Key being deleted accidentally while - running task in check mode (https://github.com/ansible-collections/community.general/issues/3621, - https://github.com/ansible-collections/community.general/pull/3622). - - gitlab_group - avoid passing wrong value for ``require_two_factor_authentication`` - on creation when the option has not been specified (https://github.com/ansible-collections/community.general/pull/3453). - - gitlab_group_members - ``get_group_id`` return the group ID by matching ``full_path``, - ``path`` or ``name`` (https://github.com/ansible-collections/community.general/pull/3400). - - gitlab_group_members - fixes issue when gitlab group has more then 20 members, - pagination problem (https://github.com/ansible-collections/community.general/issues/3041). - - gitlab_project - user projects are created using namespace ID now, instead - of user ID (https://github.com/ansible-collections/community.general/pull/2881). - - gitlab_project_members - ``get_project_id`` return the project id by matching - ``full_path`` or ``name`` (https://github.com/ansible-collections/community.general/pull/3602). - - gitlab_project_members - fixes issue when gitlab group has more then 20 members, - pagination problem (https://github.com/ansible-collections/community.general/issues/3041). - - idrac_redfish_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - influxdb_retention_policy - fix bug where ``INF`` duration values failed parsing - (https://github.com/ansible-collections/community.general/pull/2385). - - influxdb_user - allow creation of admin users when InfluxDB authentication - is enabled but no other user exists on the database. In this scenario, InfluxDB - 1.x allows only ``CREATE USER`` queries and rejects any other query (https://github.com/ansible-collections/community.general/issues/2364). - - influxdb_user - fix bug where an influxdb user has no privileges for 2 or - more databases (https://github.com/ansible-collections/community.general/pull/2499). - - influxdb_user - fix bug which removed current privileges instead of appending - them to existing ones (https://github.com/ansible-collections/community.general/issues/2609, - https://github.com/ansible-collections/community.general/pull/2614). - - ini_file - fix Unicode processing for Python 2 (https://github.com/ansible-collections/community.general/pull/2875). - - ini_file - fix inconsistency between empty value and no value (https://github.com/ansible-collections/community.general/issues/3031). - - interfaces_file - no longer reporting change when none happened (https://github.com/ansible-collections/community.general/pull/3328). - - inventory and vault scripts - change file permissions to make vendored inventory - and vault scripts exectuable (https://github.com/ansible-collections/community.general/pull/2337). - - ipa_* modules - fix environment fallback for ``ipa_host`` option (https://github.com/ansible-collections/community.general/issues/3560). - - ipa_sudorule - call ``sudorule_add_allow_command`` method instead of ``sudorule_add_allow_command_group`` - (https://github.com/ansible-collections/community.general/issues/2442). - - iptables_state - call ``async_status`` action plugin rather than its module - (https://github.com/ansible-collections/community.general/issues/2700). - - iptables_state - fix a 'FutureWarning' in a regex and do some basic code clean - up (https://github.com/ansible-collections/community.general/pull/2525). - - iptables_state - fix a broken query of ``async_status`` result with current - ansible-core development version (https://github.com/ansible-collections/community.general/issues/2627, - https://github.com/ansible-collections/community.general/pull/2671). - - iptables_state - fix initialization of iptables from null state when adressing - more than one table (https://github.com/ansible-collections/community.general/issues/2523). - - java_cert - fix issue with incorrect alias used on PKCS#12 certificate import - (https://github.com/ansible-collections/community.general/pull/2560). - - java_cert - import private key as well as public certificate from PKCS#12 - (https://github.com/ansible-collections/community.general/issues/2460). - - java_keystore - add parameter ``keystore_type`` to control output file format - and override ``keytool``'s default, which depends on Java version (https://github.com/ansible-collections/community.general/issues/2515). - - jboss - fix the deployment file permission issue when Jboss server is running - under non-root user. The deployment file is copied with file content only. - The file permission is set to ``440`` and belongs to root user. When the JBoss - ``WildFly`` server is running under non-root user, it is unable to read the - deployment file (https://github.com/ansible-collections/community.general/pull/3426). - - jenkins_build - examine presence of ``build_number`` before deleting a jenkins - build (https://github.com/ansible-collections/community.general/pull/2850). - - jenkins_plugin - use POST method for sending request to jenkins API when ``state`` - option is one of ``enabled``, ``disabled``, ``pinned``, ``unpinned``, or ``absent`` - (https://github.com/ansible-collections/community.general/issues/2510). - - json_query filter plugin - avoid 'unknown type' errors for more Ansible internal - types (https://github.com/ansible-collections/community.general/pull/2607). - - keycloak_authentication - fix bug when two identical executions are in the - same authentication flow (https://github.com/ansible-collections/community.general/pull/2904). - - keycloak_authentication - fix bug, the requirement was always on ``DISABLED`` - when creating a new authentication flow (https://github.com/ansible-collections/community.general/pull/3330). - - keycloak_client - update the check mode to not show differences resulting - from sorting and default values relating to the properties, ``redirectUris``, - ``attributes``, and ``protocol_mappers`` (https://github.com/ansible-collections/community.general/pull/3610). - - keycloak_identity_provider - fix change detection when updating identity provider - mappers (https://github.com/ansible-collections/community.general/pull/3538, - https://github.com/ansible-collections/community.general/issues/3537). - - keycloak_realm - ``ssl_required`` changed from a boolean type to accept the - strings ``none``, ``external`` or ``all``. This is not a breaking change since - the module always failed when a boolean was supplied (https://github.com/ansible-collections/community.general/pull/2693). - - keycloak_realm - element type for ``events_listeners`` parameter should be - ``string`` instead of ``dict`` (https://github.com/ansible-collections/community.general/pull/3231). - - keycloak_realm - remove warning that ``reset_password_allowed`` needs to be - marked as ``no_log`` (https://github.com/ansible-collections/community.general/pull/2694). - - keycloak_role - quote role name when used in URL path to avoid errors when - role names contain special characters (https://github.com/ansible-collections/community.general/issues/3535, - https://github.com/ansible-collections/community.general/pull/3536). - - launchd - fixed sanity check in the module's code (https://github.com/ansible-collections/community.general/pull/2960). - - launchd - use private attribute to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194). - - linode inventory plugin - fix default value of new option ``ip_style`` (https://github.com/ansible-collections/community.general/issues/3337). - - linode_v4 - changed the error message to point to the correct bugtracker URL - (https://github.com/ansible-collections/community.general/pull/2430). - - logdns callback plugin - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194). - - logstash callback plugin - replace ``_option`` with ``context.CLIARGS`` to - fix the plugin on ansible-base and ansible-core (https://github.com/ansible-collections/community.general/issues/2692). - - lvol - fixed rounding errors (https://github.com/ansible-collections/community.general/issues/2370). - - lvol - fixed size unit capitalization to match units used between different - tools for comparison (https://github.com/ansible-collections/community.general/issues/2360). - - lvol - honor ``check_mode`` on thinpool (https://github.com/ansible-collections/community.general/issues/2934). - - macports - add ``stdout`` and ``stderr`` to return values (https://github.com/ansible-collections/community.general/issues/3499). - - maven_artifact - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194). - - memcached cache plugin - change function argument names to fix sanity errors - (https://github.com/ansible-collections/community.general/pull/3194). - - memset_memstore_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - memset_server_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - modprobe - added additional checks to ensure module load/unload is effective - (https://github.com/ansible-collections/community.general/issues/1608). - - module_helper module utils - ``CmdMixin`` must also use ``LC_ALL`` to enforce - locale choice (https://github.com/ansible-collections/community.general/pull/2731). - - module_helper module utils - avoid failing when non-zero ``rc`` is present - on regular exit (https://github.com/ansible-collections/community.general/pull/2912). - - module_helper module utils - fixed change-tracking for dictionaries and lists - (https://github.com/ansible-collections/community.general/pull/2951). - - netapp module utils - remove always-true conditional to fix sanity errors - (https://github.com/ansible-collections/community.general/pull/3194). - - netcup_dns - use ``str(ex)`` instead of unreliable ``ex.message`` in exception - handling to fix ``AttributeError`` in error cases (https://github.com/ansible-collections/community.general/pull/2590). - - nmap inventory plugin - fix local variable error when cache is disabled (https://github.com/ansible-collections/community.general/issues/2512). - - nmcli - added ip4/ip6 configuration arguments for ``sit`` and ``ipip`` tunnels - (https://github.com/ansible-collections/community.general/issues/3238, https://github.com/ansible-collections/community.general/pull/3239). - - nmcli - compare MAC addresses case insensitively to fix idempotency issue - (https://github.com/ansible-collections/community.general/issues/2409). - - nmcli - fixed ``dns6`` option handling so that it is treated as a list internally - (https://github.com/ansible-collections/community.general/pull/3563). - - nmcli - fixed ``ipv4.route-metric`` being in properties of type list (https://github.com/ansible-collections/community.general/pull/3563). - - nmcli - fixes team-slave configuration by adding connection.slave-type (https://github.com/ansible-collections/community.general/issues/766). - - nmcli - if type is ``bridge-slave`` add ``slave-type bridge`` to ``nmcli`` - command (https://github.com/ansible-collections/community.general/issues/2408). - - npm - correctly handle cases where a dependency does not have a ``version`` - property because it is either missing or invalid (https://github.com/ansible-collections/community.general/issues/2917). - - npm - when the ``version`` option is used the comparison of installed vs missing - will use name@version instead of just name, allowing version specific updates - (https://github.com/ansible-collections/community.general/issues/2021). - - one_image - fix error message when renaming an image (https://github.com/ansible-collections/community.general/pull/3626). - - one_template - change function argument name to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194). - - one_vm - Allow missing NIC keys (https://github.com/ansible-collections/community.general/pull/2435). - - oneview_datacenter_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - oneview_enclosure_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - oneview_ethernet_network_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - oneview_fc_network_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - oneview_fcoe_network_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - oneview_logical_interconnect_group_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - oneview_network_set_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - oneview_san_manager_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - online inventory plugin - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194). - - online module utils - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194). - - open_iscsi - calling ``run_command`` with arguments as ``list`` instead of - ``str`` (https://github.com/ansible-collections/community.general/pull/3286). - - openbsd_pkg - fix crash from ``KeyError`` exception when package installs, - but ``pkg_add`` returns with a non-zero exit code (https://github.com/ansible-collections/community.general/pull/3336). - - openbsd_pkg - fix regexp matching crash. This bug could trigger on package - names with special characters, for example ``g++`` (https://github.com/ansible-collections/community.general/pull/3161). - - opentelemetry callback plugin - validated the task result exception without - crashing. Also simplifying code a bit (https://github.com/ansible-collections/community.general/pull/3450, - https://github.com/ansible/ansible/issues/75726). - - openwrt_init - calling ``run_command`` with arguments as ``list`` instead - of ``str`` (https://github.com/ansible-collections/community.general/pull/3284). - - ovir4 inventory script - improve configparser creation to avoid crashes for - options without values (https://github.com/ansible-collections/community.general/issues/674). - - packet_device - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194). - - packet_sshkey - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194). - - pacman - fix changed status when ignorepkg has been defined (https://github.com/ansible-collections/community.general/issues/1758). - - pamd - code for ``state=updated`` when dealing with the pam module arguments, - made no distinction between ``None`` and an empty list (https://github.com/ansible-collections/community.general/issues/3260). - - pamd - fixed problem with files containing only one or two lines (https://github.com/ansible-collections/community.general/issues/2925). - - pids - avoid crashes for older ``psutil`` versions, like on RHEL6 and RHEL7 - (https://github.com/ansible-collections/community.general/pull/2808). - - pipx - ``state=inject`` was failing to parse the list of injected packages - (https://github.com/ansible-collections/community.general/pull/3611). - - pipx - set environment variable ``USE_EMOJI=0`` to prevent errors in platforms - that do not support ``UTF-8`` (https://github.com/ansible-collections/community.general/pull/3611). - - pipx - the output value ``cmd_args`` was bringing the intermediate command - used to gather the state, instead of the command that actually performed the - state change (https://github.com/ansible-collections/community.general/pull/3655). - - pkgin - Fix exception encountered when all packages are already installed - (https://github.com/ansible-collections/community.general/pull/3583). - - pkgng - ``name=* state=latest`` check for upgrades did not count "Number of - packages to be reinstalled" as a `changed` action, giving incorrect results - in both regular and check mode (https://github.com/ansible-collections/community.general/pull/3526). - - pkgng - an `earlier PR `_ - broke check mode so that the module always reports `not changed`. This is - now fixed so that the module reports number of upgrade or install actions - that would be performed (https://github.com/ansible-collections/community.general/pull/3526). - - pkgng - the ``annotation`` functionality was broken and is now fixed, and - now also works with check mode (https://github.com/ansible-collections/community.general/pull/3526). - - proxmox inventory plugin - fixed parsing failures when some cluster nodes - are offline (https://github.com/ansible-collections/community.general/issues/2931). - - proxmox inventory plugin - fixed plugin failure when a ``qemu`` guest has - no ``template`` key (https://github.com/ansible-collections/community.general/pull/3052). - - proxmox_group_info - fix module crash if a ``group`` parameter is used (https://github.com/ansible-collections/community.general/pull/3649). - - proxmox_kvm - clone operation should return the VMID of the target VM and - not that of the source VM. This was failing when the target VM with the chosen - name already existed (https://github.com/ansible-collections/community.general/pull/3266). - - proxmox_kvm - fix parsing of Proxmox VM information with device info not containing - a comma, like disks backed by ZFS zvols (https://github.com/ansible-collections/community.general/issues/2840). - - proxmox_kvm - fix result of clone, now returns ``newid`` instead of ``vmid`` - (https://github.com/ansible-collections/community.general/pull/3034). - - proxmox_kvm - fixed ``vmid`` return value when VM with ``name`` already exists - (https://github.com/ansible-collections/community.general/issues/2648). - - puppet - replace ``console` with ``stdout`` in ``logdest`` option when ``all`` - has been chosen (https://github.com/ansible-collections/community.general/issues/1190). - - rax_facts - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - redfish_command - fix extraneous error caused by missing ``bootdevice`` argument - when using the ``DisableBootOverride`` sub-command (https://github.com/ansible-collections/community.general/issues/3005). - - redfish_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - redfish_utils module utils - do not attempt to change the boot source override - mode if not specified by the user (https://github.com/ansible-collections/community.general/issues/3509/). - - redfish_utils module utils - if a manager network property is not specified - in the service, attempt to change the requested settings (https://github.com/ansible-collections/community.general/issues/3404/). - - redfish_utils module utils - if given, add account ID of user that should - be created to HTTP request (https://github.com/ansible-collections/community.general/pull/3343/). - - redis cache - improved connection string parsing (https://github.com/ansible-collections/community.general/issues/497). - - rhsm_release - fix the issue that module considers 8, 7Client and 7Workstation - as invalid releases (https://github.com/ansible-collections/community.general/pull/2571). - - saltstack connection plugin - fix function signature (https://github.com/ansible-collections/community.general/pull/3194). - - scaleway module utils - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194). - - scaleway plugin inventory - fix ``JSON object must be str, not 'bytes'`` with - Python 3.5 (https://github.com/ansible-collections/community.general/issues/2769). - - smartos_image_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - snap - also use ``LC_ALL`` to enforce locale choice (https://github.com/ansible-collections/community.general/pull/2731). - - snap - fix formatting of ``--channel`` argument when the ``channel`` option - is used (https://github.com/ansible-collections/community.general/pull/3028). - - snap - fix various bugs which prevented the module from working at all, and - which resulted in ``state=absent`` fail on absent snaps (https://github.com/ansible-collections/community.general/issues/2835, - https://github.com/ansible-collections/community.general/issues/2906, https://github.com/ansible-collections/community.general/pull/2912). - - snap - fixed the order of the ``--classic`` parameter in the command line - invocation (https://github.com/ansible-collections/community.general/issues/2916). - - snap_alias - the output value ``cmd_args`` was bringing the intermediate command - used to gather the state, instead of the command that actually performed the - state change (https://github.com/ansible-collections/community.general/pull/3655). - - snmp_facts - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - ssh_config - reduce stormssh searches based on host (https://github.com/ansible-collections/community.general/pull/2568/). - - stacki_host - when adding a new server, ``rack`` and ``rank`` must be passed, - and network parameters are optional (https://github.com/ansible-collections/community.general/pull/2681). - - stackpath_compute inventory script - fix broken validation checks for client - ID and client secret (https://github.com/ansible-collections/community.general/pull/2448). - - supervisorctl - state ``signalled`` was not working (https://github.com/ansible-collections/community.general/pull/3068). - - svr4pkg - convert string to a bytes-like object to avoid ``TypeError`` with - Python 3 (https://github.com/ansible-collections/community.general/issues/2373). - - taiga - some constructs in the module fixed to work also in Python 3 (https://github.com/ansible-collections/community.general/pull/3067). - - terraform - ensure the workspace is set back to its previous value when the - apply fails (https://github.com/ansible-collections/community.general/pull/2634). - - tss lookup plugin - fixed backwards compatibility issue with ``python-tss-sdk`` - version <=0.0.5 (https://github.com/ansible-collections/community.general/issues/3192, - https://github.com/ansible-collections/community.general/pull/3199). - - tss lookup plugin - fixed incompatibility with ``python-tss-sdk`` version - 1.0.0 (https://github.com/ansible-collections/community.general/issues/3057, - https://github.com/ansible-collections/community.general/pull/3139). - - udm_dns_record - fixed managing of PTR records, which can never have worked - before (https://github.com/ansible-collections/community.general/pull/3256). - - ufw - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194). - - utm_aaa_group_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - utm_ca_host_key_cert_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - utm_network_interface_address_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - utm_proxy_frontend_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - utm_proxy_location_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - vdo - boolean arguments now compared with proper ``true`` and ``false`` values - instead of string representations like ``"yes"`` or ``"no"`` (https://github.com/ansible-collections/community.general/pull/3191). - - xenserver_facts - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - xfconf - also use ``LC_ALL`` to enforce locale choice (https://github.com/ansible-collections/community.general/issues/2715). - - xfconf_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). - - yaml callback plugin - avoid modifying PyYAML so that other plugins using - it on the controller, like the ``to_yaml`` filter, do not produce different - output (https://github.com/ansible-collections/community.general/issues/3471, - https://github.com/ansible-collections/community.general/pull/3478). - - yum_versionlock - fix idempotency when using wildcard (asterisk) in ``name`` - option (https://github.com/ansible-collections/community.general/issues/2761). - - zfs - certain ZFS properties, especially sizes, would lead to a task being - falsely marked as "changed" even when no actual change was made (https://github.com/ansible-collections/community.general/issues/975, - https://github.com/ansible-collections/community.general/pull/2454). - - zfs - treated received properties as local (https://github.com/ansible-collections/community.general/pull/502). - - zypper_repository - fix idempotency on adding repository with ``$releasever`` - and ``$basearch`` variables (https://github.com/ansible-collections/community.general/issues/1985). - - zypper_repository - when an URL to a .repo file was provided in option ``repo=`` - and ``state=present`` only the first run was successful, future runs failed - due to missing checks prior starting zypper. Usage of ``state=absent`` in - combination with a .repo file was not working either (https://github.com/ansible-collections/community.general/issues/1791, - https://github.com/ansible-collections/community.general/issues/3466). - deprecated_features: - - ali_instance_info - marked removal version of deprecated parameters ``availability_zone`` - and ``instance_names`` (https://github.com/ansible-collections/community.general/issues/2429). - - bitbucket_* modules - ``username`` options have been deprecated in favor of - ``workspace`` and will be removed in community.general 6.0.0 (https://github.com/ansible-collections/community.general/pull/2045). - - dnsimple - python-dnsimple < 2.0.0 is deprecated and support for it will be - removed in community.general 5.0.0 (https://github.com/ansible-collections/community.general/pull/2946#discussion_r667624693). - - gitlab_group_members - setting ``gitlab_group`` to ``name`` or ``path`` is - deprecated. Use ``full_path`` instead (https://github.com/ansible-collections/community.general/pull/3451). - - keycloak_authentication - the return value ``flow`` is now deprecated and - will be removed in community.general 6.0.0; use ``end_state`` instead (https://github.com/ansible-collections/community.general/pull/3280). - - keycloak_group - the return value ``group`` is now deprecated and will be - removed in community.general 6.0.0; use ``end_state`` instead (https://github.com/ansible-collections/community.general/pull/3280). - - linode - parameter ``backupsenabled`` is deprecated and will be removed in - community.general 5.0.0 (https://github.com/ansible-collections/community.general/pull/2410). - - lxd_container - the current default value ``true`` of ``ignore_volatile_options`` - is deprecated and will change to ``false`` in community.general 6.0.0 (https://github.com/ansible-collections/community.general/pull/3429). - - serverless - deprecating parameter ``functions`` because it was not used in - the code (https://github.com/ansible-collections/community.general/pull/2845). - - xfconf - deprecate the ``get`` state. The new module ``xfconf_info`` should - be used instead (https://github.com/ansible-collections/community.general/pull/3049). - major_changes: - - 'bitbucket_* modules - ``client_id`` is no longer marked as ``no_log=true``. - If you relied on its value not showing up in logs and output, please mark - the whole tasks with ``no_log: true`` (https://github.com/ansible-collections/community.general/pull/2045).' - minor_changes: - - Avoid internal ansible-core module_utils in favor of equivalent public API - available since at least Ansible 2.9 (https://github.com/ansible-collections/community.general/pull/2877). - - ModuleHelper module utils - improved mechanism for customizing the calculation - of ``changed`` (https://github.com/ansible-collections/community.general/pull/2514). - - Remove unnecessary ``__init__.py`` files from ``plugins/`` (https://github.com/ansible-collections/community.general/pull/2632). - - apache2_module - minor refactoring improving code quality, readability and - speed (https://github.com/ansible-collections/community.general/pull/3106). - - archive - added ``dest_state`` return value to describe final state of ``dest`` - after successful task execution (https://github.com/ansible-collections/community.general/pull/2913). - - archive - added ``exclusion_patterns`` option to exclude files or subdirectories - from archives (https://github.com/ansible-collections/community.general/pull/2616). - - archive - refactoring prior to fix for idempotency checks. The fix will be - a breaking change and only appear in community.general 4.0.0 (https://github.com/ansible-collections/community.general/pull/2987). - - bitbucket_* modules - add ``user`` and ``password`` options for Basic authentication - (https://github.com/ansible-collections/community.general/pull/2045). - - chroot connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). - - cloud_init_data_facts - minor refactor (https://github.com/ansible-collections/community.general/pull/2557). - - cmd (Module Helper) module utils - ``CmdMixin`` now pulls the value for ``run_command()`` - params from ``self.vars``, as opposed to previously retrieving those from - ``self.module.params`` (https://github.com/ansible-collections/community.general/pull/2517). - - composer - add ``composer_executable`` option (https://github.com/ansible-collections/community.general/issues/2649). - - datadog_event - adding parameter ``api_host`` to allow selecting a datadog - API endpoint instead of using the default one (https://github.com/ansible-collections/community.general/issues/2774, - https://github.com/ansible-collections/community.general/pull/2775). - - datadog_monitor - allow creation of composite datadog monitors (https://github.com/ansible-collections/community.general/issues/2956). - - dig lookup plugin - add ``retry_servfail`` option (https://github.com/ansible-collections/community.general/pull/3247). - - dnsimple - module rewrite to include support for python-dnsimple>=2.0.0; also - add ``sandbox`` parameter (https://github.com/ansible-collections/community.general/pull/2946). - - elastic callback plugin - enriched the stacktrace information with the ``message``, - ``exception`` and ``stderr`` fields from the failed task (https://github.com/ansible-collections/community.general/pull/3556). - - filesystem - cleanup and revamp module, tests and doc. Pass all commands to - ``module.run_command()`` as lists. Move the device-vs-mountpoint logic to - ``grow()`` method. Give to all ``get_fs_size()`` the same logic and error - handling. (https://github.com/ansible-collections/community.general/pull/2472). - - filesystem - extend support for FreeBSD. Avoid potential data loss by checking - existence of a filesystem with ``fstyp`` (native command) if ``blkid`` (foreign - command) doesn't find one. Add support for character devices and ``ufs`` filesystem - type (https://github.com/ansible-collections/community.general/pull/2902). - - flatpak - add ``no_dependencies`` parameter (https://github.com/ansible/ansible/pull/55452, - https://github.com/ansible-collections/community.general/pull/2751). - - flatpak - allows installing or uninstalling a list of packages (https://github.com/ansible-collections/community.general/pull/2521). - - funcd connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). - - gem - add ``bindir`` option to specify an installation path for executables - such as ``/home/user/bin`` or ``/home/user/.local/bin`` (https://github.com/ansible-collections/community.general/pull/2837). - - gem - add ``norc`` option to avoid loading any ``.gemrc`` file (https://github.com/ansible-collections/community.general/pull/2837). - - github_repo - add new option ``api_url`` to allow working with on premises - installations (https://github.com/ansible-collections/community.general/pull/3038). - - gitlab_group - add new options ``project_creation_level``, ``auto_devops_enabled``, - ``subgroup_creation_level`` (https://github.com/ansible-collections/community.general/pull/3248). - - gitlab_group - add new property ``require_two_factor_authentication`` (https://github.com/ansible-collections/community.general/pull/3367). - - gitlab_group_members - ``gitlab_user`` can now also be a list of users (https://github.com/ansible-collections/community.general/pull/3047). - - gitlab_group_members - added functionality to set all members exactly as given - (https://github.com/ansible-collections/community.general/pull/3047). - - gitlab_project - add new options ``allow_merge_on_skipped_pipeline``, ``only_allow_merge_if_all_discussions_are_resolved``, - ``only_allow_merge_if_pipeline_succeeds``, ``packages_enabled``, ``remove_source_branch_after_merge``, - ``squash_option`` (https://github.com/ansible-collections/community.general/pull/3002). - - gitlab_project - add new properties ``ci_config_path`` and ``shared_runners_enabled`` - (https://github.com/ansible-collections/community.general/pull/3379). - - gitlab_project - projects can be created under other user's namespaces with - the new ``username`` option (https://github.com/ansible-collections/community.general/pull/2824). - - gitlab_project_members - ``gitlab_user`` can now also be a list of users (https://github.com/ansible-collections/community.general/pull/3319). - - gitlab_project_members - added functionality to set all members exactly as - given (https://github.com/ansible-collections/community.general/pull/3319). - - gitlab_runner - support project-scoped gitlab.com runners registration (https://github.com/ansible-collections/community.general/pull/634). - - gitlab_user - add ``expires_at`` option (https://github.com/ansible-collections/community.general/issues/2325). - - gitlab_user - add functionality for adding external identity providers to - a GitLab user (https://github.com/ansible-collections/community.general/pull/2691). - - gitlab_user - allow to reset an existing password with the new ``reset_password`` - option (https://github.com/ansible-collections/community.general/pull/2691). - - gitlab_user - specifying a password is no longer necessary (https://github.com/ansible-collections/community.general/pull/2691). - - gunicorn - search for ``gunicorn`` binary in more paths (https://github.com/ansible-collections/community.general/pull/3092). - - hana_query - added the abillity to use hdbuserstore (https://github.com/ansible-collections/community.general/pull/3125). - - hpilo_info - added ``host_power_status`` return value to report power state - of machine with ``OFF``, ``ON`` or ``UNKNOWN`` (https://github.com/ansible-collections/community.general/pull/3079). - - idrac_redfish_config - modified set_manager_attributes function to skip invalid - attribute instead of returning. Added skipped attributes to output. Modified - module exit to add warning variable (https://github.com/ansible-collections/community.general/issues/1995). - - influxdb_retention_policy - add ``state`` parameter with allowed values ``present`` - and ``absent`` to support deletion of existing retention policies (https://github.com/ansible-collections/community.general/issues/2383). - - influxdb_retention_policy - simplify duration logic parsing (https://github.com/ansible-collections/community.general/pull/2385). - - ini_file - add abbility to define multiple options with the same name but - different values (https://github.com/ansible-collections/community.general/issues/273, - https://github.com/ansible-collections/community.general/issues/1204). - - ini_file - add module option ``exclusive`` (boolean) for the ability to add/remove - single ``option=value`` entries without overwriting existing options with - the same name but different values (https://github.com/ansible-collections/community.general/pull/3033). - - ini_file - opening file with encoding ``utf-8-sig`` (https://github.com/ansible-collections/community.general/issues/2189). - - interfaces_file - minor refactor (https://github.com/ansible-collections/community.general/pull/3328). - - iocage connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). - - ipa_config - add ``ipaselinuxusermaporder`` option to set the SELinux user - map order (https://github.com/ansible-collections/community.general/pull/3178). - - ipa_group - add ``append`` option for adding group and users members, instead - of replacing the respective lists (https://github.com/ansible-collections/community.general/pull/3545). - - jail connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). - - java_keystore - added ``ssl_backend`` parameter for using the cryptography - library instead of the OpenSSL binary (https://github.com/ansible-collections/community.general/pull/2485). - - java_keystore - replace envvar by stdin to pass secret to ``keytool`` (https://github.com/ansible-collections/community.general/pull/2526). - - jenkins_build - support stopping a running jenkins build (https://github.com/ansible-collections/community.general/pull/2850). - - jenkins_job_info - the ``password`` and ``token`` parameters can also be omitted - to retrieve only public information (https://github.com/ansible-collections/community.general/pull/2948). - - jenkins_plugin - add fallback url(s) for failure of plugin installation/download - (https://github.com/ansible-collections/community.general/pull/1334). - - jira - add comment visibility parameter for comment operation (https://github.com/ansible-collections/community.general/pull/2556). - - kernel_blacklist - revamped the module using ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/3329). - - keycloak_* modules - refactor many of the ``keycloak_*`` modules to have similar - structures, comments, and documentation (https://github.com/ansible-collections/community.general/pull/3280). - - keycloak_authentication - enhanced diff mode to also return before and after - state when the authentication flow is updated (https://github.com/ansible-collections/community.general/pull/2963). - - keycloak_client - add ``authentication_flow_binding_overrides`` option (https://github.com/ansible-collections/community.general/pull/2949). - - keycloak_realm - add ``events_enabled`` parameter to allow activation or deactivation - of login events (https://github.com/ansible-collections/community.general/pull/3231). - - linode - added proper traceback when failing due to exceptions (https://github.com/ansible-collections/community.general/pull/2410). - - linode - parameter ``additional_disks`` is now validated as a list of dictionaries - (https://github.com/ansible-collections/community.general/pull/2410). - - linode inventory plugin - adds the ``ip_style`` configuration key. Set to - ``api`` to get more detailed network details back from the remote Linode host - (https://github.com/ansible-collections/community.general/pull/3203). - - lxc connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). - - lxd_container - add ``ignore_volatile_options`` option which allows to disable - the behavior that the module ignores options starting with ``volatile.`` (https://github.com/ansible-collections/community.general/pull/3331). - - mail - added the ``ehlohost`` parameter which allows for manual override of - the host used in SMTP EHLO (https://github.com/ansible-collections/community.general/pull/3425). - - maven_artifact - added ``checksum_alg`` option to support SHA1 checksums in - order to support FIPS systems (https://github.com/ansible-collections/community.general/pull/2662). - - module_helper cmd module utils - added the ``ArgFormat`` style ``BOOLEAN_NOT``, - to add CLI parameters when the module argument is false-ish (https://github.com/ansible-collections/community.general/pull/3290). - - module_helper module utils - added feature flag parameter to ``CmdMixin`` - to control whether ``cmd_args`` is automatically added to the module output - (https://github.com/ansible-collections/community.general/pull/3648). - - module_helper module utils - added feature flag parameters to ``CmdMixin`` - to control whether ``rc``, ``out`` and ``err`` are automatically added to - the module output (https://github.com/ansible-collections/community.general/pull/2922). - - module_helper module utils - break down of the long file into smaller pieces - (https://github.com/ansible-collections/community.general/pull/2393). - - module_helper module utils - method ``CmdMixin.run_command()`` now accepts - ``process_output`` specifying a function to process the outcome of the underlying - ``module.run_command()`` (https://github.com/ansible-collections/community.general/pull/2564). - - module_helper module_utils - added classmethod to trigger the execution of - MH modules (https://github.com/ansible-collections/community.general/pull/3206). - - nmcli - add ``disabled`` value to ``method6`` option (https://github.com/ansible-collections/community.general/issues/2730). - - nmcli - add ``dummy`` interface support (https://github.com/ansible-collections/community.general/issues/724). - - nmcli - add ``gre`` tunnel support (https://github.com/ansible-collections/community.general/issues/3105, - https://github.com/ansible-collections/community.general/pull/3262). - - nmcli - add ``gsm`` support (https://github.com/ansible-collections/community.general/pull/3313). - - nmcli - add ``routing_rules4`` and ``may_fail4`` options (https://github.com/ansible-collections/community.general/issues/2730). - - nmcli - add ``runner`` and ``runner_hwaddr_policy`` options (https://github.com/ansible-collections/community.general/issues/2901). - - nmcli - add ``wifi-sec`` option change detection to support managing secure - Wi-Fi connections (https://github.com/ansible-collections/community.general/pull/3136). - - nmcli - add ``wifi`` option to support managing Wi-Fi settings such as ``hidden`` - or ``mode`` (https://github.com/ansible-collections/community.general/pull/3081). - - nmcli - add new options to ignore automatic DNS servers and gateways (https://github.com/ansible-collections/community.general/issues/1087). - - nmcli - query ``nmcli`` directly to determine available WiFi options (https://github.com/ansible-collections/community.general/pull/3141). - - nmcli - remove dead code, ``options`` never contains keys from ``param_alias`` - (https://github.com/ansible-collections/community.general/pull/2417). - - nmcli - the option ``routing_rules4`` can now be specified as a list of strings, - instead of as a single string (https://github.com/ansible-collections/community.general/issues/3401). - - nrdp callback plugin - parameters are now converted to strings, except ``validate_certs`` - which is converted to boolean (https://github.com/ansible-collections/community.general/pull/2878). - - onepassword lookup plugin - add ``domain`` option (https://github.com/ansible-collections/community.general/issues/2734). - - open-iscsi - adding support for mutual authentication between target and initiator - (https://github.com/ansible-collections/community.general/pull/3422). - - open_iscsi - add ``auto_portal_startup`` parameter to allow ``node.startup`` - setting per portal (https://github.com/ansible-collections/community.general/issues/2685). - - open_iscsi - also consider ``portal`` and ``port`` to check if already logged - in or not (https://github.com/ansible-collections/community.general/issues/2683). - - open_iscsi - minor refactoring (https://github.com/ansible-collections/community.general/pull/3286). - - opentelemetry callback plugin - added option ``enable_from_environment`` to - support enabling the plugin only if the given environment variable exists - and it is set to true (https://github.com/ansible-collections/community.general/pull/3498). - - opentelemetry callback plugin - enriched the span attributes with HTTP metadata - for those Ansible tasks that interact with third party systems (https://github.com/ansible-collections/community.general/pull/3448). - - opentelemetry callback plugin - enriched the stacktrace information for loops - with the ``message``, ``exception`` and ``stderr`` fields from the failed - item in the tasks in addition to the name of the task and failed item (https://github.com/ansible-collections/community.general/pull/3599). - - opentelemetry callback plugin - enriched the stacktrace information with the - ``message``, ``exception`` and ``stderr`` fields from the failed task (https://github.com/ansible-collections/community.general/pull/3496). - - opentelemetry callback plugin - transformed args in a list of span attributes - in addition it redacted username and password from any URLs (https://github.com/ansible-collections/community.general/pull/3564). - - openwrt_init - minor refactoring (https://github.com/ansible-collections/community.general/pull/3284). - - opkg - allow ``name`` to be a YAML list of strings (https://github.com/ansible-collections/community.general/issues/572, - https://github.com/ansible-collections/community.general/pull/3554). - - pacman - add ``executable`` option to use an alternative pacman binary (https://github.com/ansible-collections/community.general/issues/2524). - - pacman - speed up checking if the package is installed, when the latest version - check is not needed (https://github.com/ansible-collections/community.general/pull/3606). - - pamd - minor refactorings (https://github.com/ansible-collections/community.general/pull/3285). - - passwordstore lookup - add option ``missing`` to choose what to do if the - password file is missing (https://github.com/ansible-collections/community.general/pull/2500). - - pids - refactor to add support for older ``psutil`` versions to the ``pattern`` - option (https://github.com/ansible-collections/community.general/pull/3315). - - pipx - minor refactor on the ``changed`` logic (https://github.com/ansible-collections/community.general/pull/3647). - - pkgin - in case of ``pkgin`` tool failue, display returned standard output - ``stdout`` and standard error ``stderr`` to ease debugging (https://github.com/ansible-collections/community.general/issues/3146). - - pkgng - ``annotation`` can now also be a YAML list (https://github.com/ansible-collections/community.general/pull/3526). - - pkgng - packages being installed (or upgraded) are acted on in one command - (per action) (https://github.com/ansible-collections/community.general/issues/2265). - - pkgng - status message specifies number of packages installed and/or upgraded - separately. Previously, all changes were reported as one count of packages - "added" (https://github.com/ansible-collections/community.general/pull/3393). - - proxmox inventory plugin - added snapshots to host facts (https://github.com/ansible-collections/community.general/pull/3044). - - proxmox_group_info - minor refactor (https://github.com/ansible-collections/community.general/pull/2557). - - proxmox_kvm - minor refactor (https://github.com/ansible-collections/community.general/pull/2557). - - qubes connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). - - rax_mon_notification_plan - fixed validation checks by specifying type ``str`` - as the ``elements`` of parameters ``ok_state``, ``warning_state`` and ``critical_state`` - (https://github.com/ansible-collections/community.general/pull/2955). - - redfish_command - add ``boot_override_mode`` argument to BootSourceOverride - commands (https://github.com/ansible-collections/community.general/issues/3134). - - redfish_command and redfish_config and redfish_utils module utils - add parameter - to strip etag of quotes before patch, since some vendors do not properly ``If-Match`` - etag with quotes (https://github.com/ansible-collections/community.general/pull/3296). - - redfish_config - modified module exit to add warning variable (https://github.com/ansible-collections/community.general/issues/1995). - - redfish_info - include ``Status`` property for Thermal objects when querying - Thermal properties via ``GetChassisThermals`` command (https://github.com/ansible-collections/community.general/issues/3232). - - redfish_utils module utils - modified set_bios_attributes function to skip - invalid attribute instead of returning. Added skipped attributes to output - (https://github.com/ansible-collections/community.general/issues/1995). - - redhat_subscription - add ``server_prefix`` and ``server_port`` parameters - (https://github.com/ansible-collections/community.general/pull/2779). - - redis - allow to use the term ``replica`` instead of ``slave``, which has - been the official Redis terminology since 2018 (https://github.com/ansible-collections/community.general/pull/2867). - - rhevm - minor refactor (https://github.com/ansible-collections/community.general/pull/2557). - - saltstack connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). - - scaleway plugin inventory - parse scw-cli config file for ``oauth_token`` - (https://github.com/ansible-collections/community.general/pull/3250). - - serverless - minor refactor (https://github.com/ansible-collections/community.general/pull/2557). - - slack - minor refactoring (https://github.com/ansible-collections/community.general/pull/3205). - - snap - added ``enabled`` and ``disabled`` states (https://github.com/ansible-collections/community.general/issues/1990). - - snap - improved module error handling, especially for the case when snap server - is down (https://github.com/ansible-collections/community.general/issues/2970). - - splunk callback plugin - add ``batch`` option for user-configurable correlation - ID's (https://github.com/ansible-collections/community.general/issues/2790). - - spotinst_aws_elastigroup - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/2355). - - ssh_config - new feature to set ``ForwardAgent`` option to ``yes`` or ``no`` - (https://github.com/ansible-collections/community.general/issues/2473). - - stacki_host - minor refactoring (https://github.com/ansible-collections/community.general/pull/2681). - - supervisorctl - add the possibility to restart all programs and program groups - (https://github.com/ansible-collections/community.general/issues/3551). - - supervisorctl - using standard Ansible mechanism to validate ``signalled`` - state required parameter (https://github.com/ansible-collections/community.general/pull/3068). - - terraform - add ``check_destroy`` optional parameter to check for deletion - of resources before it is applied (https://github.com/ansible-collections/community.general/pull/2874). - - terraform - add ``parallelism`` parameter (https://github.com/ansible-collections/community.general/pull/3540). - - terraform - add option ``overwrite_init`` to skip init if exists (https://github.com/ansible-collections/community.general/pull/2573). - - terraform - minor refactor (https://github.com/ansible-collections/community.general/pull/2557). - - timezone - print error message to debug instead of warning when timedatectl - fails (https://github.com/ansible-collections/community.general/issues/1942). - - tss lookup plugin - added ``token`` parameter for token authorization; ``username`` - and ``password`` are optional when ``token`` is provided (https://github.com/ansible-collections/community.general/pull/3327). - - tss lookup plugin - added new parameter for domain authorization (https://github.com/ansible-collections/community.general/pull/3228). - - tss lookup plugin - refactored to decouple the supporting third-party library - (``python-tss-sdk``) (https://github.com/ansible-collections/community.general/pull/3252). - - ufw - if ``delete=true`` and ``insert`` option is present, then ``insert`` - is now ignored rather than failing with a syntax error (https://github.com/ansible-collections/community.general/pull/3514). - - vdo - minor refactoring of the code (https://github.com/ansible-collections/community.general/pull/3191). - - zfs - added diff mode support (https://github.com/ansible-collections/community.general/pull/502). - - zfs_delegate_admin - drop choices from permissions, allowing any permission - supported by the underlying zfs commands (https://github.com/ansible-collections/community.general/pull/2540). - - zone connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). - - zpool_facts - minor refactoring (https://github.com/ansible-collections/community.general/pull/3332). - - zypper - prefix zypper commands with ``/sbin/transactional-update --continue - --drop-if-no-change --quiet run`` if transactional updates are detected (https://github.com/ansible-collections/community.general/issues/3159). - release_summary: This is release 4.0.0 of ``community.general``, released on - 2021-11-02. - removed_features: - - All inventory and vault scripts contained in community.general were moved - to the `contrib-scripts GitHub repository `_ - (https://github.com/ansible-collections/community.general/pull/2696). - - ModuleHelper module utils - remove fallback when value could not be determined - for a parameter (https://github.com/ansible-collections/community.general/pull/3461). - - Removed deprecated netapp module utils and doc fragments (https://github.com/ansible-collections/community.general/pull/3197). - - The nios, nios_next_ip, nios_next_network lookup plugins, the nios documentation - fragment, and the nios_host_record, nios_ptr_record, nios_mx_record, nios_fixed_address, - nios_zone, nios_member, nios_a_record, nios_aaaa_record, nios_network, nios_dns_view, - nios_txt_record, nios_naptr_record, nios_srv_record, nios_cname_record, nios_nsgroup, - and nios_network_view module have been removed from community.general 4.0.0 - and were replaced by redirects to the `infoblox.nios_modules `_ - collection. Please install the ``infoblox.nios_modules`` collection to continue - using these plugins and modules, and update your FQCNs (https://github.com/ansible-collections/community.general/pull/3592). - - The vendored copy of ``ipaddress`` has been removed. Please use ``ipaddress`` - from the Python 3 standard library, or `from pypi `_. - (https://github.com/ansible-collections/community.general/pull/2441). - - cpanm - removed the deprecated ``system_lib`` option. Use Ansible's privilege - escalation mechanism instead; the option basically used ``sudo`` (https://github.com/ansible-collections/community.general/pull/3461). - - grove - removed the deprecated alias ``message`` of the ``message_content`` - option (https://github.com/ansible-collections/community.general/pull/3461). - - proxmox - default value of ``proxmox_default_behavior`` changed to ``no_defaults`` - (https://github.com/ansible-collections/community.general/pull/3461). - - proxmox_kvm - default value of ``proxmox_default_behavior`` changed to ``no_defaults`` - (https://github.com/ansible-collections/community.general/pull/3461). - - runit - removed the deprecated ``dist`` option which was not used by the module - (https://github.com/ansible-collections/community.general/pull/3461). - - telegram - removed the deprecated ``msg``, ``msg_format`` and ``chat_id`` - options (https://github.com/ansible-collections/community.general/pull/3461). - - xfconf - the default value of ``disable_facts`` changed to ``true``, and the - value ``false`` is no longer allowed. Register the module results instead - (https://github.com/ansible-collections/community.general/pull/3461). - security_fixes: - - nmcli - do not pass WiFi secrets on the ``nmcli`` command line. Use ``nmcli - con edit`` instead and pass secrets as ``stdin`` (https://github.com/ansible-collections/community.general/issues/3145). - fragments: - - 1085-consul-acl-hcl-whitelist-update.yml - - 1334-jenkins-plugin-fallback-urls.yaml - - 1942_timezone.yml - - 2045-bitbucket_support_basic_auth.yaml - - 2126-consul_kv-pass-token.yml - - 2284-influxdb_retention_policy-fix_duration_parsing.yml - - 2323-groupby_as_dict-filter.yml - - 2334-redfish_config-skip-incorrect-attributes.yml - - 2337-mark-inventory-scripts-executable.yml - - 2348-composer-no-interaction-option-discovery-to-avoid-hang.yaml - - 2355-spotinst_aws_elastigroup-list-elements.yml - - 2364-influxdb_user-first_user.yml - - 2369-lvol_size_bug_fixes.yml - - 2373-svr4pkg-fix-typeerror.yml - - 2383-influxdb_retention_policy-add-state-option.yml - - 2393-module_helper-breakdown.yml - - 2407-puppet-change_stdout_to_console.yaml - - 2409-nmcli_add_slave-type_bridge_to_nmcli_command_if_type_is_bridge-slave.yml - - 2410-linode-improvements.yml - - 2411-snap-revamp-enabled-disabled-states.yml - - 2416-nmcli_compare_mac_addresses_case_insensitively.yml - - 2417-nmcli_remove_dead_code.yml - - 2430-linodev4-error-message.yml - - 2435-one_vm-fix_missing_keys.yml - - 2448-stackpath_compute-fix.yml - - 2450-gitlab_user-add_expires_at_option.yaml - - 2454-detect_zfs_changed.yml - - 2461-ovirt4-fix-configparser.yml - - 2472_filesystem_module_revamp.yml - - 2485-java_keystore-ssl_backend-parameter.yml - - 2499-influxdb_user-fix-multiple-no-privileges.yml - - 2500-passwordstore-add_option_ignore_missing.yml - - 2510-jenkins_plugin_use_post_method.yml - - 2514-mh-improved-changed.yml - - 2516_fix_2515_keystore_type_jks.yml - - 2517-cmd-params-from-vars.yml - - 2518-nmap-fix-cache-disabled.yml - - 2520-connection-refactors.yml - - 2521-flatpak-list.yml - - 2524-pacman_add_bin_option.yml - - 2525-iptables_state-fix-initialization-command.yml - - 2526-java_keystore-password-via-stdin.yml - - 2540-zfs-delegate-choices.yml - - 2556-add-comment_visibility-parameter-for-comment-operation-of-jira-module.yml - - 2557-cloud-misc-refactor.yml - - 2560-java_cert-pkcs12-alias-bugfix.yml - - 2564-mh-cmd-process-output.yml - - 2568-ssh_config-reduce-stormssh-searches-based-on-host.yml - - 2571-rhsm_release-fix-release_matcher.yaml - - 2573-terraform-overwrite-init.yml - - 2578-ini-file-utf8-bom.yml - - 2579-redis-cache-ipv6.yml - - 2590-netcup_dns-exception-no-message-attr.yml - - 2614-influxdb_user-fix-issue-introduced-in-PR#2499.yml - - 2616-archive-exclusion_patterns-option.yml - - 2632-cleanup.yml - - 2634-terraform-switch-workspace.yml - - 2635-nmcli-add-ignore-auto-arguments.yml - - 2648-proxmox_kvm-fix-vmid-return-value.yml - - 2650-composer-add_composer_executable.yml - - 2661-maven_artifact-add-sha1-option.yml - - 2671-fix-broken-query-of-async_status-result.yml - - 2681-stacki-host-bugfix.yml - - 2684-open_iscsi-single-target-multiple-portal-overrides.yml - - 2691-gitlab_user-support-identity-provider.yml - - 2692-logstash-callback-plugin-replacing_options.yml - - 2711-fix-iptables_state-2700-async_status-call.yml - - 2722-zypper_repository-fix_idempotency_on_adding_repo_with_releasever.yml - - 273-add_multiple_options_with_same_name_to_ini_file.yml - - 2731-mh-cmd-locale.yml - - 2732-nmcli_add_options.yml - - 2735-onepassword-add_domain_option.yml - - 2751-flatpak-no_dependencies.yml - - 2771-scaleway_inventory_json_accept_byte_array.yml - - 2774-datadog_event_api_parameter.yml - - 2779_redhat_subscription-add_server_prefix_and_server_port.yml - - 2787-yum_versionlock-fix_idempotency_when_using_wildcard.yml - - 2790-callback_splunk-batch-option.yml - - 2808-pids-older-psutil.yml - - 2816-archive-refactor.yml - - 2821-ipa_sudorule.yml - - 2824-gitlab_project-project-under-user.yml - - 2827-nmcli_fix_team_slave.yml - - 2830-npm-version-update.yml - - 2841-proxmox_kvm_zfs_devstr.yml - - 2843-modprobe-failure-conditions.yml - - 2844-ali_instance_info-deprecate-params.yml - - 2845-serverless-deprecate-functions-param.yml - - 2850-jenkins_build-support-stop-jenkins-build.yml - - 2867-redis-terminology.yml - - 2874-terraform-check-destroy.yml - - 2875-ini_file-unicode.yml - - 2878-validate-certs-bool.yml - - 2881-gitlab_project-fix_workspace_user.yaml - - 2883-_mount-fixed-sanity-checks.yml - - 2901-nmcli_teaming.yml - - 2902-filesystem_extend_freebsd_support.yml - - 2904-fix-bug-when-2-identical-executions-in-same-auth-flow.yml - - 2912-snap-module-helper.yml - - 2913-archive-dest_state.yml - - 2918-snap-param-order.yml - - 2922-mh-cmd-output-feature-flag.yml - - 2923-archive-remove-bugfix.yml - - 2924-npm-fix-package-json.yml - - 2935-lvol-support_check_mode_thinpool.yml - - 2936-pacman-fix_changed_status_when_ignorepkg_has_been_defined.yml - - 2946-python-dnsimple-v2-rewrite.yml - - 2948-jenkins_job_info-remove_necessities_on_password_or_token.yml - - 2949-add_authentication-flow-binding_keycloak-client.yml - - 2951-mh-vars-deepcopy.yml - - 2955-rax_mon_notification_plan-added-elements-to-list-params.yaml - - 2958-datadog_monitor_support_composites.yml - - 2960-launchd-validation-check.yaml - - 2963-improve-diff-mode-on-keycloak_authentication.yml - - 2967-proxmox_inventory-offline-node-fix.yml - - 2987-archive-stage-idempotency-fix.yml - - 2989-pamd-single-line.yaml - - 3001-enhance_gitlab_module.yml - - 3006-redfish_command-bootoverride-argument-check.yaml - - 3028-snap-channel.yml - - 3034-promox-kvm-return-new-id.yaml - - 3036-archive-root-path-fix.yml - - 3038-enhance_github_repo_api_url.yml - - 3041-fix_gitlab_group_members_gitlab_project_mambers.yml - - 3041-gitlab_x_members_fix_and_enhancement.yml - - 3044-proxmox-inventory-snapshots.yml - - 3049-xfconf-deprecate-get.yaml - - 3052_proxmox_inventory_plugin.yml - - 3067-taiga-bugfix.yaml - - 3068-supervisorctl-bugfix.yaml - - 3074-ini_file-3031-empty-value-inconsistency.yml - - 3075-archive-idempotency-enhancements.yml - - 3079-report-power-state-hpilo.yaml - - 3080-java_cert-2460-import_private_key.yml - - 3081-add-wifi-option-to-nmcli-module.yml - - 3084-info-checkmode.yaml - - 3092-gunicorn-refactor.yaml - - 3093-ejabberd_user-refactor.yaml - - 3098-django_manage-cmd-list.yaml - - 3104-deploy_helper-required_if.yaml - - 3106-apache2_module-review.yaml - - 3125-hana-query-userstore.yaml - - 3132-nmcli-dummy.yaml - - 3135-add-redfish_command-bootoverridemode.yaml - - 3136-add-wifi-sec-change-detection-to-nmcli-module.yml - - 3139-tss-lookup-plugin-update-to-make-compatible-with-sdk-v1.yml - - 3141-disallow-options-unsupported-by-nmcli.yml - - 3160-pass-wifi-secrets-via-stdin-to-nmcli-module.yml - - 3161-openbsd-pkg-fix-regexp-matching-crash.yml - - 3164-zypper-support-transactional-updates.yaml - - 3178-add-ipaselinuxusermaporder-to-ipa-config-module.yml - - 3191-vdo-refactor.yml - - 3194-sanity.yml - - 3199-tss-lookup-plugin-bugfix-for-backwards-compatibility.yml - - 3203-linode-inventory-return-full-api-ip-data.yml - - 3205-slack-minor-refactor.yaml - - 3206-mh-classmethod.yaml - - 3211-snap-error-handling.yml - - 3228-tss-domain-authorization.yml - - 3231-fix-keycloak-realm-events.yml - - 3233-include-thermal-sensor-status-via-redfish_info.yaml - - 3237-copr-fix_chroot_naming.yml - - 3239-nmcli-sit-ipip-config-bugfix.yaml - - 3247-retry_servfail-for-dig.yaml - - 3248-adds-few-more-gitlab-group-options.yml - - 3250-parse-scw-config.yml - - 3252-tss_lookup_plugin-refactor.yml - - 3256-fix-ptr-handling-in-udm_dns_record.yml - - 3258-apache2_module.yml - - 3262-nmcli-add-gre-tunnel-support.yaml - - 3266-vmid-existing-target-clone.yml - - 3267-dnsimple1-deprecation.yml - - 3280-keycloak-module-cleanup-and-consistency.yml - - 3283-django_manage-fix-command-splitting.yaml - - 3284-openwrt_init-improvements.yaml - - 3285-pamd-updated-with-empty-args.yaml - - 3286-open_iscsi-improvements.yaml - - 3290-mh-cmd-boolean-not.yaml - - 3296-clean-etag.yaml - - 3313-nmcli-add_gsm_support.yml - - 3315-pids-refactor.yml - - 3319-gitlab_project_members_enhancement.yml - - 3327-tss-token-authorization.yml - - 3328-interfaces_file-improvements.yaml - - 3329-kernel_blacklist-improvements.yaml - - 3330-bugfix-keycloak-authentication-flow-requirements-not-set-correctly.yml.yml - - 3331-do_not_ignore_volatile_configs_by_option.yml - - 3332-zpool_facts-pythonify.yaml - - 3334-django_manage-split-params.yaml - - 3336-openbsd_pkg-fix-KeyError.yml - - 3337-linode-fix.yml - - 3343-redfish_utils-addUser-userId.yml - - 3359-add-unicode_normalize-filter.yml - - 3367-add-require_two_factor_authentication-property-to-gitlab-group.yml - - 3379-gitlab_project-ci_cd_properties.yml - - 3393-pkgng-many_packages_one_command.yml - - 3400-fix-gitLab-api-searches-always-return-first-found-match-3386.yml - - 3401-nmcli-needs-type.yml - - 3404-redfish_utils-skip-manager-network-check.yml - - 3422-open-iscsi-mutual-authentication-support.yaml - - 3425-mail_add_configurable_ehlo_hostname.yml - - 3426-copy-permissions-along-with-file-for-jboss-module.yml - - 3429-enable_deprecaded_message_for_ignore_volatile_option.yml - - 3450-callback_opentelemetry-exception_handling.yml - - 3451-gitlab-group-member-deprecate-name-and-path.yml - - 3453-fix-gitlab_group-require_two_factor_authentication-cant_be_null.yml - - 3461-remove-deprecations-for-4.0.0.yml - - 3473-gitlab_deploy_key-fix_idempotency.yml - - 3474-zypper_repository_improve_repo_file_idempotency.yml - - 3478-yaml-callback.yml - - 3495-ssh_config_add_forwardagent_option.yml - - 3496-callback_opentelemetry-enrich_stacktraces.yml - - 3498-callback_opentelemetry-only_in_ci.yml - - 3500-macports-add-stdout-and-stderr-to-status.yaml - - 3509-redfish_utils-SetOneTimeBoot-mode-fix.yml - - 3514-ufw_insert_or_delete_biased_when_deletion_enabled.yml - - 3526-pkgng-add-integration-tests.yml - - 3536-quote-role-name-in-url.yml - - 3538-fix-keycloak-idp-mappers-change-detection.yml - - 3540-terraform_add_parallelism_parameter.yml - - 3545-ipa_group-add-append-option.yml - - 3551-supervisor-all.yml - - 3554-opkg-name.yml - - 3556-callback_elastic-enrich_stacktraces.yml - - 3558-callback_opentelemetry-enrich_service_map.yml - - 3561-fix-ipa-host-var-detection.yml - - 3563-nmcli-ipv6_dns.yaml - - 3564-callback_opentelemetry-redacted_user_pass_from_url_args.yml - - 3583-fix-pkgin-exception.yml - - 3599-callback_opentelemetry-enriched_errors_in_loops.yml - - 3602-fix-gitlab_project_members-improve-search-method.yml - - 3606-pacman-speed-up-check-if-package-is-installed.yml - - 3610-fix-keycloak-client-diff-bugs-when-sorting.yml - - 3611-pipx-fix-inject.yml - - 3622-fix-gitlab-deploy-key-check-mode.yml - - 3626-fix-one_image-error.yml - - 3634-pipx-improve-changed.yaml - - 3648-mh-cmd-publish-cmd.yaml - - 3649-proxmox_group_info_TypeError.yml - - 3655-use-publish_cmd.yaml - - 4.0.0.yml - - 502-zfs_bugfix_and_diff_mode_support.yaml - - 634-gitlab_project_runners.yaml - - a_module-test.yml - - ansible-core-_text.yml - - gem_module_add_bindir_option.yml - - ipaddress.yml - - json_query_more_types.yml - - keycloak-realm-no-log-password-reset.yml - - keycloak_realm_ssl_required.yml - - netapp-removal.yml - - nios-removal.yml - - pkgin-output-after-error.yml - - remove-scripts.yml - modules: - - description: Install Ansible roles or collections using ansible-galaxy - name: ansible_galaxy_install - namespace: packaging.language - - description: Send Discord messages - name: discord - namespace: notification - - description: Locks package versions in C(dnf) based systems - name: dnf_versionlock - namespace: packaging.os - - description: (un)Marking existing branches for protection - name: gitlab_protected_branch - namespace: source_control.gitlab - - description: Execute SQL on HANA - name: hana_query - namespace: database.saphana - - description: Configure authentication in Keycloak - name: keycloak_authentication - namespace: identity.keycloak - - description: Allows administration of Keycloak client_rolemapping with the Keycloak - API - name: keycloak_client_rolemapping - namespace: identity.keycloak - - description: Allows administration of Keycloak client_scopes via Keycloak API - name: keycloak_clientscope - namespace: identity.keycloak - - description: Allows administration of Keycloak identity providers via Keycloak - API - name: keycloak_identity_provider - namespace: identity.keycloak - - description: Allows administration of Keycloak roles via Keycloak API - name: keycloak_role - namespace: identity.keycloak - - description: Allows administration of Keycloak user federations via Keycloak - API - name: keycloak_user_federation - namespace: identity.keycloak - - description: Execute SQL scripts on a MSSQL database - name: mssql_script - namespace: database.mssql - - description: Manage pacman's list of trusted keys - name: pacman_key - namespace: packaging.os - - description: Manages applications installed with pipx - name: pipx - namespace: packaging.language - - description: Management of a NIC of a Qemu(KVM) VM in a Proxmox VE cluster. - name: proxmox_nic - namespace: cloud.misc - - description: Retrieve information about one or more Proxmox VE tasks - name: proxmox_tasks_info - namespace: cloud.misc - - description: Set key value pairs in Redis - name: redis_data - namespace: database.misc - - description: Increment keys in Redis - name: redis_data_incr - namespace: database.misc - - description: Get value of key in Redis database - name: redis_data_info - namespace: database.misc - - description: Query executions for a Rundeck job - name: rundeck_job_executions_info - namespace: web_infrastructure - - description: Run a Rundeck job - name: rundeck_job_run - namespace: web_infrastructure - - description: Perform SAP Task list execution - name: sap_task_list_execute - namespace: system - - description: Manages SAP SAPCAR archives - name: sapcar_extract - namespace: files - - description: Manages snap aliases - name: snap_alias - namespace: packaging.os - - description: Retrieve XFCE4 configurations - name: xfconf_info - namespace: system - plugins: - callback: - - description: Create distributed traces for each Ansible task in Elastic APM - name: elastic - namespace: null - - description: Create distributed traces with OpenTelemetry - name: opentelemetry - namespace: null - filter: - - description: Transform a sequence of dictionaries to a dictionary where the - dictionaries are indexed by an attribute - name: groupby_as_dict - namespace: null - - description: Normalizes unicode strings to facilitate comparison of characters - with normalized forms - name: unicode_normalize - namespace: null - inventory: - - description: Icinga2 inventory source - name: icinga2 - namespace: null - - description: OpenNebula inventory source - name: opennebula - namespace: null - lookup: - - description: Retrieves the version of an installed collection - name: collection_version - namespace: null - - description: Composes a list with nested elements of other lists or dicts - which can depend on previous loop variables - name: dependent - namespace: null - - description: Generates random pet names - name: random_pet - namespace: null - - description: Generates random string - name: random_string - namespace: null - - description: Return a number of random words - name: random_words - namespace: null - test: - - description: Check whether the given string refers to an available module - or action plugin - name: a_module - namespace: null - release_date: '2021-11-02' - 4.0.1: - changes: - bugfixes: - - a_module test plugin - fix crash when testing a module name that was tombstoned - (https://github.com/ansible-collections/community.general/pull/3660). - - xattr - fix exception caused by ``_run_xattr()`` raising a ``ValueError`` - due to a mishandling of base64-encoded value (https://github.com/ansible-collections/community.general/issues/3673). - release_summary: Bugfix release for today's Ansible 5.0.0 beta 1. - fragments: - - 3660-a_module-tombstone.yml - - 3675-xattr-handle-base64-values.yml - - 4.0.1.yml - release_date: '2021-11-09' - 4.0.2: - changes: - bugfixes: - - counter_enabled callback plugin - fix output to correctly display host and - task counters in serial mode (https://github.com/ansible-collections/community.general/pull/3709). - - ldap_search - allow it to be used even in check mode (https://github.com/ansible-collections/community.general/issues/3619). - - lvol - allows logical volumes to be created with certain size arguments prefixed - with ``+`` to preserve behavior of older versions of this module (https://github.com/ansible-collections/community.general/issues/3665). - - nmcli - fixed falsely reported changed status when ``mtu`` is omitted with - ``dummy`` connections (https://github.com/ansible-collections/community.general/issues/3612, - https://github.com/ansible-collections/community.general/pull/3625). - deprecated_features: - - Support for Ansible 2.9 and ansible-base 2.10 is deprecated, and will be removed - in the next major release (community.general 5.0.0) next spring. While most - content will probably still work with ansible-base 2.10, we will remove symbolic - links for modules and action plugins, which will make it impossible to use - them with Ansible 2.9 anymore. Please use community.general 4.x.y with Ansible - 2.9 and ansible-base 2.10, as these releases will continue to support Ansible - 2.9 and ansible-base 2.10 even after they are End of Life (https://github.com/ansible-community/community-topics/issues/50, - https://github.com/ansible-collections/community.general/pull/3723). - release_summary: Bugfix release for today's Ansible 5.0.0 beta 2. - fragments: - - 3625-nmcli_false_changed_mtu_fix.yml - - 3667-ldap_search.yml - - 3681-lvol-fix-create.yml - - 3709-support-batch-mode.yml - - 4.0.2.yml - - deprecate-ansible-2.9-2.10.yml - release_date: '2021-11-16' - 4.1.0: - changes: - bugfixes: - - github_repo - ``private`` and ``description`` attributes should not be set - to default values when the repo already exists (https://github.com/ansible-collections/community.general/pull/2386). - - terraform - fix command options being ignored during planned/plan in function - ``build_plan`` such as ``lock`` or ``lock_timeout`` (https://github.com/ansible-collections/community.general/issues/3707, - https://github.com/ansible-collections/community.general/pull/3726). - minor_changes: - - gitlab - clean up modules and utils (https://github.com/ansible-collections/community.general/pull/3694). - - ipmi_boot - add support for user-specified IPMI encryption key (https://github.com/ansible-collections/community.general/issues/3698). - - ipmi_power - add support for user-specified IPMI encryption key (https://github.com/ansible-collections/community.general/issues/3698). - - listen_ports_facts - add support for ``ss`` command besides ``netstat`` (https://github.com/ansible-collections/community.general/pull/3708). - - lxd_container - adds ``type`` option which also allows to operate on virtual - machines and not just containers (https://github.com/ansible-collections/community.general/pull/3661). - - nmcli - add multiple addresses support for ``ip4`` parameter (https://github.com/ansible-collections/community.general/issues/1088, - https://github.com/ansible-collections/community.general/pull/3738). - - open_iscsi - extended module to allow rescanning of established session for - one or all targets (https://github.com/ansible-collections/community.general/issues/3763). - - pacman - add ``stdout`` and ``stderr`` as return values (https://github.com/ansible-collections/community.general/pull/3758). - - redfish_command - add ``GetHostInterfaces`` command to enable reporting Redfish - Host Interface information (https://github.com/ansible-collections/community.general/issues/3693). - - redfish_command - add ``SetHostInterface`` command to enable configuring the - Redfish Host Interface (https://github.com/ansible-collections/community.general/issues/3632). - release_summary: Regular bugfix and feature release. - fragments: - - 1088-nmcli_add_multiple_addresses_support.yml - - 2386-github_repo-fix-idempotency-issues.yml - - 3632-add-redfish-host-interface-config-support.yml - - 3661-lxd_container-add-vm-support.yml - - 3693-add-redfish-host-interface-info-support.yml - - 3694-gitlab-cleanup.yml - - 3702-ipmi-encryption-key.yml - - 3708-listen_ports_facts-add-ss-support.yml - - 3726-terraform-missing-parameters-planned-fix.yml - - 3758-pacman-add-stdout-stderr.yml - - 3765-extend-open_iscsi-with-rescan.yml - - 4.1.0.yml - plugins: - inventory: - - description: Xen Orchestra inventory source - name: xen_orchestra - namespace: null - lookup: - - description: Get secrets from RevBits PAM server - name: revbitspss - namespace: null - release_date: '2021-11-23' - 4.2.0: - changes: - bugfixes: - - icinga2 inventory plugin - handle 404 error when filter produces no results - (https://github.com/ansible-collections/community.general/issues/3875, https://github.com/ansible-collections/community.general/pull/3906). - - interfaces_file - fixed the check for existing option in interface (https://github.com/ansible-collections/community.general/issues/3841). - - jira - fixed bug where module returns error related to dictionary key ``body`` - (https://github.com/ansible-collections/community.general/issues/3419). - - nmcli - fix returning "changed" when no mask set for IPv4 or IPv6 addresses - on task rerun (https://github.com/ansible-collections/community.general/issues/3768). - - nmcli - pass ``flags``, ``ingress``, ``egress`` params to ``nmcli`` (https://github.com/ansible-collections/community.general/issues/1086). - - nrdp callback plugin - fix error ``string arguments without an encoding`` - (https://github.com/ansible-collections/community.general/issues/3903). - - opentelemetry_plugin - honour ``ignore_errors`` when a task has failed instead - of reporting an error (https://github.com/ansible-collections/community.general/pull/3837). - - pipx - passes the correct command line option ``--include-apps`` (https://github.com/ansible-collections/community.general/issues/3791). - - proxmox - fixed ``onboot`` parameter causing module failures when undefined - (https://github.com/ansible-collections/community.general/issues/3844). - - python_requirements_info - fails if version operator used without version - (https://github.com/ansible-collections/community.general/pull/3785). - deprecated_features: - - module_helper module utils - deprecated the attribute ``ModuleHelper.VarDict`` - (https://github.com/ansible-collections/community.general/pull/3801). - minor_changes: - - aix_filesystem - calling ``run_command`` with arguments as ``list`` instead - of ``str`` (https://github.com/ansible-collections/community.general/pull/3833). - - aix_lvg - calling ``run_command`` with arguments as ``list`` instead of ``str`` - (https://github.com/ansible-collections/community.general/pull/3834). - - gitlab - add more token authentication support with the new options ``api_oauth_token`` - and ``api_job_token`` (https://github.com/ansible-collections/community.general/issues/705). - - gitlab_group, gitlab_project - add new option ``avatar_path`` (https://github.com/ansible-collections/community.general/pull/3792). - - gitlab_project - add new option ``default_branch`` to gitlab_project (if ``readme - = true``) (https://github.com/ansible-collections/community.general/pull/3792). - - hponcfg - revamped module using ModuleHelper (https://github.com/ansible-collections/community.general/pull/3840). - - icinga2 inventory plugin - added the ``display_name`` field to variables (https://github.com/ansible-collections/community.general/issues/3875, - https://github.com/ansible-collections/community.general/pull/3906). - - icinga2 inventory plugin - inventory object names are changable using ``inventory_attr`` - in your config file to the host object name, address, or display_name fields - (https://github.com/ansible-collections/community.general/issues/3875, https://github.com/ansible-collections/community.general/pull/3906). - - ip_netns - calling ``run_command`` with arguments as ``list`` instead of ``str`` - (https://github.com/ansible-collections/community.general/pull/3822). - - iso_extract - calling ``run_command`` with arguments as ``list`` instead of - ``str`` (https://github.com/ansible-collections/community.general/pull/3805). - - java_cert - calling ``run_command`` with arguments as ``list`` instead of - ``str`` (https://github.com/ansible-collections/community.general/pull/3835). - - jira - add support for Bearer token auth (https://github.com/ansible-collections/community.general/pull/3838). - - keycloak_user_federation - add sssd user federation support (https://github.com/ansible-collections/community.general/issues/3767). - - logentries - calling ``run_command`` with arguments as ``list`` instead of - ``str`` (https://github.com/ansible-collections/community.general/pull/3807). - - logstash_plugin - calling ``run_command`` with arguments as ``list`` instead - of ``str`` (https://github.com/ansible-collections/community.general/pull/3808). - - lxc_container - calling ``run_command`` with arguments as ``list`` instead - of ``str`` (https://github.com/ansible-collections/community.general/pull/3851). - - lxd connection plugin - make sure that ``ansible_lxd_host``, ``ansible_executable``, - and ``ansible_lxd_executable`` work (https://github.com/ansible-collections/community.general/pull/3798). - - lxd inventory plugin - support virtual machines (https://github.com/ansible-collections/community.general/pull/3519). - - module_helper module utils - added decorators ``check_mode_skip`` and ``check_mode_skip_returns`` - for skipping methods when ``check_mode=True`` (https://github.com/ansible-collections/community.general/pull/3849). - - monit - calling ``run_command`` with arguments as ``list`` instead of ``str`` - (https://github.com/ansible-collections/community.general/pull/3821). - - nmcli - add multiple addresses support for ``ip6`` parameter (https://github.com/ansible-collections/community.general/issues/1088). - - nmcli - add support for ``eui64`` and ``ipv6privacy`` parameters (https://github.com/ansible-collections/community.general/issues/3357). - - python_requirements_info - returns python version broken down into its components, - and some minor refactoring (https://github.com/ansible-collections/community.general/pull/3797). - - svc - calling ``run_command`` with arguments as ``list`` instead of ``str`` - (https://github.com/ansible-collections/community.general/pull/3829). - - xattr - calling ``run_command`` with arguments as ``list`` instead of ``str`` - (https://github.com/ansible-collections/community.general/pull/3806). - - xfconf - minor refactor on the base class for the module (https://github.com/ansible-collections/community.general/pull/3919). - release_summary: Regular bugfix and feature release. - fragments: - - 1088-add_multiple_ipv6_address_support.yml - - 3357-nmcli-eui64-and-ipv6privacy.yml - - 3519-inventory-support-lxd-4.yml - - 3768-nmcli_fix_changed_when_no_mask_set.yml - - 3780-add-keycloak-sssd-user-federation.yml - - 3785-python_requirements_info-versionless-op.yaml - - 3792-improve_gitlab_group_and_project.yml - - 3797-python_requirements_info-improvements.yaml - - 3798-fix-lxd-connection-option-vars-support.yml - - 3800-pipx-include-apps.yaml - - 3801-mh-deprecate-vardict-attr.yaml - - 3805-iso_extract-run_command-list.yaml - - 3806-xattr-run_command-list.yaml - - 3807-logentries-run_command-list.yaml - - 3808-logstash_plugin-run_command-list.yaml - - 3821-monit-run-list.yaml - - 3822-ip_netns-run-list.yaml - - 3829-svc-run-list.yaml - - 3833-aix_filesystem-run-list.yaml - - 3834-aix-lvg-run-list.yaml - - 3835-java-cert-run-list.yaml - - 3837-opentelemetry_plugin-honour_ignore_errors.yaml - - 3838-jira-token.yaml - - 3840-hponcfg-mh-revamp.yaml - - 3849-mh-check-mode-decos.yaml - - 3851-lxc-container-run-list.yaml - - 3862-interfaces-file-fix-dup-option.yaml - - 3867-jira-fix-body.yaml - - 3874-proxmox-fix-onboot-param.yml - - 3875-icinga2-inv-fix.yml - - 3896-nmcli_vlan_missing_options.yaml - - 3909-nrdp_fix_string_args_without_encoding.yaml - - 3919-xfconf-baseclass.yaml - - 4.2.0.yml - - 705-gitlab-auth-support.yml - modules: - - description: Pull basic info from DNSimple API - name: dnsimple_info - namespace: net_tools - - description: Create or delete a branch - name: gitlab_branch - namespace: source_control.gitlab - - description: Sets or updates configuration attributes on HPE iLO with Redfish - OEM extensions - name: ilo_redfish_config - namespace: remote_management.redfish - - description: Gathers server information through iLO using Redfish APIs - name: ilo_redfish_info - namespace: remote_management.redfish - release_date: '2021-12-21' - 4.3.0: - changes: - bugfixes: - - Various modules and plugins - use vendored version of ``distutils.version`` - instead of the deprecated Python standard library ``distutils`` (https://github.com/ansible-collections/community.general/pull/3936). - - alternatives - fix output parsing for alternatives groups (https://github.com/ansible-collections/community.general/pull/3976). - - jail connection plugin - replace deprecated ``distutils.spawn.find_executable`` - with Ansible's ``get_bin_path`` to find the executable (https://github.com/ansible-collections/community.general/pull/3934). - - lxd connection plugin - replace deprecated ``distutils.spawn.find_executable`` - with Ansible's ``get_bin_path`` to find the ``lxc`` executable (https://github.com/ansible-collections/community.general/pull/3934). - - passwordstore lookup plugin - replace deprecated ``distutils.util.strtobool`` - with Ansible's ``convert_bool.boolean`` to interpret values for the ``create``, - ``returnall``, ``overwrite``, 'backup``, and ``nosymbols`` options (https://github.com/ansible-collections/community.general/pull/3934). - - say callback plugin - replace deprecated ``distutils.spawn.find_executable`` - with Ansible's ``get_bin_path`` to find the ``say`` resp. ``espeak`` executables - (https://github.com/ansible-collections/community.general/pull/3934). - - scaleway_user_data - fix double-quote added where no double-quote is needed - to user data in scaleway's server (``Content-type`` -> ``Content-Type``) (https://github.com/ansible-collections/community.general/pull/3940). - - slack - add ``charset`` to HTTP headers to avoid Slack API warning (https://github.com/ansible-collections/community.general/issues/3932). - - zone connection plugin - replace deprecated ``distutils.spawn.find_executable`` - with Ansible's ``get_bin_path`` to find the executable (https://github.com/ansible-collections/community.general/pull/3934). - minor_changes: - - ipa_dnszone - ``dynamicupdate`` is now a boolean parameter, instead of a string - parameter accepting ``"true"`` and ``"false"``. Also the module is now idempotent - with respect to ``dynamicupdate`` (https://github.com/ansible-collections/community.general/pull/3374). - - ipa_dnszone - add DNS zone synchronization support (https://github.com/ansible-collections/community.general/pull/3374). - - ipmi_power - add ``machine`` option to ensure the power state via the remote - target address (https://github.com/ansible-collections/community.general/pull/3968). - - mattermost - add the possibility to send attachments instead of text messages - (https://github.com/ansible-collections/community.general/pull/3946). - - nmcli - add ``wireguard`` connection type (https://github.com/ansible-collections/community.general/pull/3985). - - proxmox - add ``clone`` parameter (https://github.com/ansible-collections/community.general/pull/3930). - - puppet - remove deprecation for ``show_diff`` parameter. Its alias ``show-diff`` - is still deprecated and will be removed in community.general 7.0.0 (https://github.com/ansible-collections/community.general/pull/3980). - - scaleway_compute - add possibility to use project identifier (new ``project`` - option) instead of deprecated organization identifier (https://github.com/ansible-collections/community.general/pull/3951). - - scaleway_volume - all volumes are systematically created on par1 (https://github.com/ansible-collections/community.general/pull/3964). - release_summary: Regular feature and bugfix release. - fragments: - - 3374-add-ipa-ptr-sync-support.yml - - 3921-add-counter-filter-plugin.yml - - 3930-proxmox-add-clone.yaml - - 3933-slack-charset-header.yaml - - 3934-distutils.yml - - 3936-distutils.version.yml - - 3940_fix_contenttype_scaleway_user_data.yml - - 3946-mattermost_attachments.yml - - 3951-scaleway_compute_add_project_id.yml - - 3964-scaleway_volume_add_region.yml - - 3968-ipmi_power-add-machine-option.yaml - - 3976-fix-alternatives-parsing.yml - - 3980-puppet-show_diff.yml - - 3985-nmcli-add-wireguard-connection-type.yml - - 4.3.0.yml - modules: - - description: Manage Rust packages with cargo - name: cargo - namespace: packaging.language - - description: Allows obtaining Keycloak realm public information via Keycloak - API - name: keycloak_realm_info - namespace: identity.keycloak - - description: Manage sudoers files - name: sudoers - namespace: system - plugins: - filter: - - description: Counts hashable elements in a sequence - name: counter - namespace: null - release_date: '2022-01-11' - 4.4.0: - changes: - bugfixes: - - cargo - fix detection of outdated packages when ``state=latest`` (https://github.com/ansible-collections/community.general/pull/4052). - - cargo - fix incorrectly reported changed status for packages with a name containing - a hyphen (https://github.com/ansible-collections/community.general/issues/4044, - https://github.com/ansible-collections/community.general/pull/4052). - - gitlab_project_variable - add missing documentation about GitLab versions - that support ``environment_scope`` and ``variable_type`` (https://github.com/ansible-collections/community.general/issues/4038). - - 'gitlab_project_variable - allow to set same variable name under different - environment scopes. Due this change, the return value ``project_variable`` - differs from previous version in check mode. It was counting ``updated`` values, - because it was accidentally overwriting environment scopes (https://github.com/ansible-collections/community.general/issues/4038). - - ' - - gitlab_project_variable - fix idempotent change behaviour for float and integer - variables (https://github.com/ansible-collections/community.general/issues/4038). - - gitlab_runner - use correct API endpoint to create and retrieve project level - runners when using ``project`` (https://github.com/ansible-collections/community.general/pull/3965). - - listen_ports_facts - local port regex was not handling well IPv6 only binding. - Fixes the regex for ``ss`` (https://github.com/ansible-collections/community.general/pull/4092). - - mail callback plugin - fix crash on Python 3 (https://github.com/ansible-collections/community.general/issues/4025, - https://github.com/ansible-collections/community.general/pull/4026). - - 'opentelemetry - fix generating a trace with a task containing ``no_log: true`` - (https://github.com/ansible-collections/community.general/pull/4043).' - - python_requirements_info - store ``mismatched`` return values per package - as documented in the module (https://github.com/ansible-collections/community.general/pull/4078). - - yarn - fix incorrect handling of ``yarn list`` and ``yarn global list`` output - that could result in fatal error (https://github.com/ansible-collections/community.general/pull/4050). - - yarn - fix incorrectly reported status when installing a package globally - (https://github.com/ansible-collections/community.general/issues/4045, https://github.com/ansible-collections/community.general/pull/4050). - - yarn - fix missing ``~`` expansion in yarn global install folder which resulted - in incorrect task status (https://github.com/ansible-collections/community.general/issues/4045, - https://github.com/ansible-collections/community.general/pull/4048). - deprecated_features: - - mail callback plugin - not specifying ``sender`` is deprecated and will be - disallowed in community.general 6.0.0 (https://github.com/ansible-collections/community.general/pull/4140). - minor_changes: - - cobbler inventory plugin - add ``include_profiles`` option (https://github.com/ansible-collections/community.general/pull/4068). - - gitlab_project_variable - new ``variables`` parameter (https://github.com/ansible-collections/community.general/issues/4038). - - icinga2 inventory plugin - implemented constructed interface (https://github.com/ansible-collections/community.general/pull/4088). - - linode inventory plugin - allow templating of ``access_token`` variable in - Linode inventory plugin (https://github.com/ansible-collections/community.general/pull/4040). - - lists_mergeby filter plugin - add parameters ``list_merge`` and ``recursive``. - These are only supported when used with ansible-base 2.10 or ansible-core, - but not with Ansible 2.9 (https://github.com/ansible-collections/community.general/pull/4058). - - lxc_container - added ``wait_for_container`` parameter. If ``true`` the module - will wait until the running task reports success as the status (https://github.com/ansible-collections/community.general/pull/4039). - - mail callback plugin - add ``Message-ID`` and ``Date`` headers (https://github.com/ansible-collections/community.general/issues/4055, - https://github.com/ansible-collections/community.general/pull/4056). - - mail callback plugin - properly use Ansible's option handling to split lists - (https://github.com/ansible-collections/community.general/pull/4140). - - nmcli - adds ``routes6`` and ``route_metric6`` parameters for supporting IPv6 - routes (https://github.com/ansible-collections/community.general/issues/4059). - - opennebula - add the release action for VMs in the ``HOLD`` state (https://github.com/ansible-collections/community.general/pull/4036). - - opentelemetry_plugin - enrich service when using the ``docker_login`` (https://github.com/ansible-collections/community.general/pull/4104). - - proxmox modules - move ``HAS_PROXMOXER`` check into ``module_utils`` (https://github.com/ansible-collections/community.general/pull/4030). - - scaleway inventory plugin - add profile parameter ``scw_profile`` (https://github.com/ansible-collections/community.general/pull/4049). - - snap - add option ``options`` permitting to set options using the ``snap set`` - command (https://github.com/ansible-collections/community.general/pull/3943). - release_summary: Regular features and bugfixes release. - fragments: - - 3935-use-gitlab-instance-runner-to-create-runner.yml - - 3943-add-option-options-to-snap-module.yml - - 4.4.0.yml - - 4026-fix-mail-callback.yml - - 4030-proxmox-has-proxmoxer.yml - - 4036-onevm-add-release-action.yaml - - 4038-fix-and-rework-gitlb-project-variable.yml - - 4039-cluster-container-wait.yml - - 4040-linode-token-templating.yaml - - 4043-fix-no-log-opentelemetry.yml - - 4048-expand-tilde-in-yarn-global-install-folder.yaml - - 4049-profile-for-scaleway-inventory.yml - - 4050-properly-parse-json-lines-output-from-yarn.yaml - - 4052-fix-detection-of-installed-cargo-packages-with-hyphens.yaml - - 4056-add-missing-mail-headers.yml - - 4058-lists_mergeby-add-parameters.yml - - 4062-nmcli-ipv6-routes-support.yml - - 4068-add-include_file-option.yml - - 4078-python_requirements_info.yaml - - 4088-add-constructed-interface-for-icinga2-inventory.yml - - 4092-fix_local_ports_regex_listen_ports_facts.yaml - - 4104-opentelemetry_plugin-enrich_docker_login.yaml - - 4140-mail-callback-options.yml - modules: - - description: Manage user accounts with systemd-homed - name: homectl - namespace: system - release_date: '2022-02-01' - 4.5.0: - changes: - bugfixes: - - dconf - skip processes that disappeared while we inspected them (https://github.com/ansible-collections/community.general/issues/4151). - - gitlab_group_variable - add missing documentation about GitLab versions that - support ``environment_scope`` and ``variable_type`` (https://github.com/ansible-collections/community.general/pull/4038). - - 'gitlab_group_variable - allow to set same variable name under different environment - scopes. Due this change, the return value ``group_variable`` differs from - previous version in check mode. It was counting ``updated`` values, because - it was accidentally overwriting environment scopes (https://github.com/ansible-collections/community.general/pull/4038). - - ' - - gitlab_group_variable - fix idempotent change behaviour for float and integer - variables (https://github.com/ansible-collections/community.general/pull/4038). - - gitlab_project_variable - ``value`` is not necessary when deleting variables - (https://github.com/ansible-collections/community.general/pull/4150). - - gitlab_runner - make ``project`` and ``owned`` mutually exclusive (https://github.com/ansible-collections/community.general/pull/4136). - - homebrew_cask - fix force install operation (https://github.com/ansible-collections/community.general/issues/3703). - - imc_rest - fixes the module failure due to the usage of ``itertools.izip_longest`` - which is not available in Python 3 (https://github.com/ansible-collections/community.general/issues/4206). - - ini_file - when removing nothing do not report changed (https://github.com/ansible-collections/community.general/issues/4154). - - keycloak_user_federation - creating a user federation while specifying an - ID (that does not exist yet) no longer fail with a 404 Not Found (https://github.com/ansible-collections/community.general/pull/4212). - - keycloak_user_federation - mappers auto-created by keycloak are matched and - merged by their name and no longer create duplicated entries (https://github.com/ansible-collections/community.general/pull/4212). - - mail callback plugin - fix encoding of the name of sender and recipient (https://github.com/ansible-collections/community.general/issues/4060, - https://github.com/ansible-collections/community.general/pull/4061). - - passwordstore lookup plugin - fix error detection for non-English locales - (https://github.com/ansible-collections/community.general/pull/4219). - - passwordstore lookup plugin - prevent returning path names as passwords by - accident (https://github.com/ansible-collections/community.general/issues/4185, - https://github.com/ansible-collections/community.general/pull/4192). - - vdo - fix options error (https://github.com/ansible-collections/community.general/pull/4163). - - yum_versionlock - fix matching of existing entries with names passed to the - module. Match yum and dnf lock format (https://github.com/ansible-collections/community.general/pull/4183). - minor_changes: - - Avoid internal ansible-core module_utils in favor of equivalent public API - available since at least Ansible 2.9. This fixes some instances added since - the last time this was fixed (https://github.com/ansible-collections/community.general/pull/4232). - - ansible_galaxy_install - added option ``no_deps`` to the module (https://github.com/ansible-collections/community.general/issues/4174). - - gitlab_group_variable - new ``variables`` parameter (https://github.com/ansible-collections/community.general/pull/4038 - and https://github.com/ansible-collections/community.general/issues/4074). - - keycloak_* modules - added connection timeout parameter when calling server - (https://github.com/ansible-collections/community.general/pull/4168). - - linode inventory plugin - add support for caching inventory results (https://github.com/ansible-collections/community.general/pull/4179). - - opentelemetry_plugin - enrich service when using the ``jenkins``, ``hetzner`` - or ``jira`` modules (https://github.com/ansible-collections/community.general/pull/4105). - - pacman - the module has been rewritten and is now much faster when using ``state=latest``. - Operations are now done all packages at once instead of package per package - and the configured output format of ``pacman`` no longer affect the module's - operation. (https://github.com/ansible-collections/community.general/pull/3907, - https://github.com/ansible-collections/community.general/issues/3783, https://github.com/ansible-collections/community.general/issues/4079) - - passwordstore lookup plugin - add configurable ``lock`` and ``locktimeout`` - options to avoid race conditions in itself and in the ``pass`` utility it - calls. By default, the plugin now locks on write operations (https://github.com/ansible-collections/community.general/pull/4194). - - proxmox modules - move common code into ``module_utils`` (https://github.com/ansible-collections/community.general/pull/4029). - - proxmox_kvm - added EFI disk support when creating VM with OVMF UEFI BIOS - with new ``efidisk0`` option (https://github.com/ansible-collections/community.general/pull/4106, - https://github.com/ansible-collections/community.general/issues/1638). - - proxmox_kwm - add ``win11`` to ``ostype`` parameter for Windows 11 and Windows - Server 2022 support (https://github.com/ansible-collections/community.general/issues/4023, - https://github.com/ansible-collections/community.general/pull/4191). - release_summary: Regular feature and bugfix release. - fragments: - - 3703-force-install-homebrew-cask.yml - - 3907-pacman-speedup.yml - - 3916-fix-vdo-options-type.yml - - 4.5.0.yml - - 4029-proxmox-refactor.yml - - 4061-fix-mail-recipient-encoding.yml - - 4086-rework_of_gitlab_proyect_variable_over_gitlab_group_variable.yml - - 4105-opentelemetry_plugin-enrich_jira_hetzner_jenkins_services.yaml - - 4106-proxmox-efidisk0-support.yaml - - 4136-gitlab_runner-make-project-owned-mutually-exclusive.yml - - 4150-gitlab-project-variable-absent-fix.yml - - 4151-dconf-catch-psutil-nosuchprocess.yaml - - 4154-ini_file_changed.yml - - 4168-add-keycloak-url-timeout.yml - - 4179-linode-inventory-cache.yaml - - 4183-fix-yum_versionlock.yaml - - 4191-proxmox-add-win11.yml - - 4192-improve-passwordstore-consistency.yml - - 4194-configurable-passwordstore-locking.yml - - 4206-imc-rest-module.yaml - - 4212-fixes-for-keycloak-user-federation.yml - - 4219-passwordstore-locale-fix.yml - - 4232-text-converter-import.yml - - 4240-ansible_galaxy_install-no_deps.yml - modules: - - description: Configure Intel Optane Persistent Memory modules - name: pmem - namespace: storage.pmem - - description: Scaleway private network management - name: scaleway_private_network - namespace: cloud.scaleway - release_date: '2022-02-22' - 4.6.0: - changes: - bugfixes: - - filesize - add support for busybox dd implementation, that is used by default - on Alpine linux (https://github.com/ansible-collections/community.general/pull/4288, - https://github.com/ansible-collections/community.general/issues/4259). - - linode inventory plugin - fix configuration handling relating to inventory - filtering (https://github.com/ansible-collections/community.general/pull/4336). - - mksysb - fixed bug for parameter ``backup_dmapi_fs`` was passing the wrong - CLI argument (https://github.com/ansible-collections/community.general/pull/3295). - - pacman - Use ``--groups`` instead of ``--group`` (https://github.com/ansible-collections/community.general/pull/4312). - - pacman - fix URL based package installation (https://github.com/ansible-collections/community.general/pull/4286, - https://github.com/ansible-collections/community.general/issues/4285). - - pacman - fix ``upgrade=yes`` (https://github.com/ansible-collections/community.general/pull/4275, - https://github.com/ansible-collections/community.general/issues/4274). - - pacman - make sure that ``packages`` is always returned when ``name`` or ``upgrade`` - is specified, also if nothing is done (https://github.com/ansible-collections/community.general/pull/4329). - - pacman - when the ``update_cache`` option is combined with another option - such as ``upgrade``, report ``changed`` based on the actions performed by - the latter option. This was the behavior in community.general 4.4.0 and before. - In community.general 4.5.0, a task combining these options would always report - ``changed`` (https://github.com/ansible-collections/community.general/pull/4318). - - proxmox inventory plugin - always convert strings that follow the ``key=value[,key=value[...]]`` - form into dictionaries (https://github.com/ansible-collections/community.general/pull/4349). - - proxmox inventory plugin - fixed the ``description`` field being ignored if - it contained a comma (https://github.com/ansible-collections/community.general/issues/4348). - - proxmox_kvm - fix error in check when creating or cloning (https://github.com/ansible-collections/community.general/pull/4306). - - proxmox_kvm - fix error when checking whether Proxmox VM exists (https://github.com/ansible-collections/community.general/pull/4287). - - terraform - fix ``variable`` handling to allow complex values (https://github.com/ansible-collections/community.general/pull/4281). - deprecated_features: - - 'pacman - from community.general 5.0.0 on, the ``changed`` status of ``update_cache`` - will no longer be ignored if ``name`` or ``upgrade`` is specified. To keep - the old behavior, add something like ``register: result`` and ``changed_when: - result.packages | length > 0`` to your task (https://github.com/ansible-collections/community.general/pull/4329).' - known_issues: - - pacman - ``update_cache`` cannot differentiate between up to date and outdated - package lists and will report ``changed`` in both situations (https://github.com/ansible-collections/community.general/pull/4318). - - pacman - binaries specified in the ``executable`` parameter must support ``--print-format`` - in order to be used by this module. In particular, AUR helper ``yay`` is known - not to currently support it (https://github.com/ansible-collections/community.general/pull/4312). - minor_changes: - - jira - when creating a comment, ``fields`` now is used for additional data - (https://github.com/ansible-collections/community.general/pull/4304). - - ldap_entry - add support for recursive deletion (https://github.com/ansible-collections/community.general/issues/3613). - - mksysb - revamped the module using ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/3295). - - nmcli - add missing connection aliases ``802-3-ethernet`` and ``802-11-wireless`` - (https://github.com/ansible-collections/community.general/pull/4108). - - nmcli - remove nmcli modify dependency on ``type`` parameter (https://github.com/ansible-collections/community.general/issues/2858). - - npm - add ability to use ``production`` flag when ``ci`` is set (https://github.com/ansible-collections/community.general/pull/4299). - - pacman - add ``remove_nosave`` parameter to avoid saving modified configuration - files as ``.pacsave`` files. (https://github.com/ansible-collections/community.general/pull/4316, - https://github.com/ansible-collections/community.general/issues/4315). - - pacman - now implements proper change detection for ``update_cache=true``. - Adds ``cache_updated`` return value to when ``update_cache=true`` to report - this result independently of the module's overall changed return value (https://github.com/ansible-collections/community.general/pull/4337). - - pipx - added options ``editable`` and ``pip_args`` (https://github.com/ansible-collections/community.general/issues/4300). - - proxmox inventory plugin - add support for client-side jinja filters (https://github.com/ansible-collections/community.general/issues/3553). - - redis - add authentication parameters ``login_user``, ``tls``, ``validate_certs``, - and ``ca_certs`` (https://github.com/ansible-collections/community.general/pull/4207). - - syslog_json - add option to skip logging of ``gather_facts`` playbook tasks; - use v2 callback API (https://github.com/ansible-collections/community.general/pull/4223). - - zypper - add support for ``--clean-deps`` option to remove packages that depend - on a package being removed (https://github.com/ansible-collections/community.general/pull/4195). - release_summary: Regular feature and bugfix release. - fragments: - - 3295-mksysb-revamp.yaml - - 4.6.0.yml - - 4108-nmcli-support-modifcation-without-type-param.yml - - 4192-zypper-add-clean-deps.yml - - 4207-add-redis-tls-support.yml - - 4223-syslog-json-skip-syslog-option.yml - - 4275-pacman-sysupgrade.yml - - 4281-terraform-complex-variables.yml - - 4286-pacman-url-pkgs.yml - - 4287-fix-proxmox-vm-chek.yml - - 4288-fix-4259-support-busybox-dd.yml - - 4299-npm-add-production-with-ci-flag.yml - - 4303-pipx-editable.yml - - 4304-jira-fields-in-comment.yml - - 4306-proxmox-fix-error-on-vm-clone.yml - - 4312-pacman-groups.yml - - 4316-pacman-remove-nosave.yml - - 4318-pacman-restore-old-changed-behavior.yml - - 4330-pacman-packages-update_cache.yml - - 4336-linode-inventory-filtering.yaml - - 4337-pacman-update_cache.yml - - 4349-proxmox-inventory-dict-facts.yml - - 4352-proxmox-inventory-filters.yml - - 4355-ldap-recursive-delete.yml - release_date: '2022-03-15' - 4.6.1: - changes: - bugfixes: - - 'lxd inventory plugin - do not crash if OS and release metadata are not present - - (https://github.com/ansible-collections/community.general/pull/4351). - - ' - - terraform - revert bugfix https://github.com/ansible-collections/community.general/pull/4281 - that tried to fix ``variable`` handling to allow complex values. It turned - out that this was breaking several valid use-cases (https://github.com/ansible-collections/community.general/issues/4367, - https://github.com/ansible-collections/community.general/pull/4370). - release_summary: Extraordinary bugfix release to fix a breaking change in ``terraform``. - fragments: - - 4.6.1.yml - - 4351-inventory-lxd-handling_metadata_wo_os_and_release.yml - - 4368-reverts-4281.yml - release_date: '2022-03-16' diff --git a/ansible_collections/community/general/changelogs/config.yaml b/ansible_collections/community/general/changelogs/config.yaml deleted file mode 100644 index fd0b422a..00000000 --- a/ansible_collections/community/general/changelogs/config.yaml +++ /dev/null @@ -1,29 +0,0 @@ -changelog_filename_template: ../CHANGELOG.rst -changelog_filename_version_depth: 0 -changes_file: changelog.yaml -changes_format: combined -keep_fragments: false -mention_ancestor: true -flatmap: true -new_plugins_after_name: removed_features -notesdir: fragments -prelude_section_name: release_summary -prelude_section_title: Release Summary -sections: -- - major_changes - - Major Changes -- - minor_changes - - Minor Changes -- - breaking_changes - - Breaking Changes / Porting Guide -- - deprecated_features - - Deprecated Features -- - removed_features - - Removed Features (previously deprecated) -- - security_fixes - - Security Fixes -- - bugfixes - - Bugfixes -- - known_issues - - Known Issues -title: Community General diff --git a/ansible_collections/community/general/docs/docsite/extra-docs.yml b/ansible_collections/community/general/docs/docsite/extra-docs.yml deleted file mode 100644 index 83f533ec..00000000 --- a/ansible_collections/community/general/docs/docsite/extra-docs.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -sections: - - title: Guides - toctree: - - filter_guide - - test_guide diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/default-common.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/default-common.yml deleted file mode 100644 index 69227fbe..00000000 --- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/default-common.yml +++ /dev/null @@ -1,13 +0,0 @@ -list1: - - name: foo - extra: true - - name: bar - extra: false - - name: meh - extra: true - -list2: - - name: foo - path: /foo - - name: baz - path: /baz diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/default-recursive-true.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/default-recursive-true.yml deleted file mode 100644 index 7d8a7cf6..00000000 --- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/default-recursive-true.yml +++ /dev/null @@ -1,19 +0,0 @@ -list1: - - name: myname01 - param01: - x: default_value - y: default_value - list: - - default_value - - name: myname02 - param01: [1, 1, 2, 3] - -list2: - - name: myname01 - param01: - y: patch_value - z: patch_value - list: - - patch_value - - name: myname02 - param01: [3, 4, 4, {key: value}] diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-001.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-001.yml deleted file mode 100644 index d1cbb4b3..00000000 --- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-001.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- name: 1. Merge two lists by common attribute 'name' - include_vars: - dir: example-001_vars -- debug: - var: list3 - when: debug|d(false)|bool -- template: - src: list3.out.j2 - dest: example-001.out diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-001_vars/default-common.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-001_vars/default-common.yml deleted file mode 120000 index 7ea8984a..00000000 --- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-001_vars/default-common.yml +++ /dev/null @@ -1 +0,0 @@ -../default-common.yml \ No newline at end of file diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-001_vars/list3.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-001_vars/list3.yml deleted file mode 100644 index 4ecfb0a6..00000000 --- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-001_vars/list3.yml +++ /dev/null @@ -1,2 +0,0 @@ -list3: "{{ list1| - community.general.lists_mergeby(list2, 'name') }}" diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-002.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-002.yml deleted file mode 100644 index d21441a8..00000000 --- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-002.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- name: 2. Merge two lists by common attribute 'name' - include_vars: - dir: example-002_vars -- debug: - var: list3 - when: debug|d(false)|bool -- template: - src: list3.out.j2 - dest: example-002.out diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-002_vars/default-common.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-002_vars/default-common.yml deleted file mode 120000 index 7ea8984a..00000000 --- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-002_vars/default-common.yml +++ /dev/null @@ -1 +0,0 @@ -../default-common.yml \ No newline at end of file diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-002_vars/list3.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-002_vars/list3.yml deleted file mode 100644 index 9eb6775f..00000000 --- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-002_vars/list3.yml +++ /dev/null @@ -1,2 +0,0 @@ -list3: "{{ [list1, list2]| - community.general.lists_mergeby('name') }}" diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-003.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-003.yml deleted file mode 100644 index 76922786..00000000 --- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-003.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- name: 3. Merge recursive by 'name', replace lists (default) - include_vars: - dir: example-003_vars -- debug: - var: list3 - when: debug|d(false)|bool -- template: - src: list3.out.j2 - dest: example-003.out diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-003_vars/default-recursive-true.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-003_vars/default-recursive-true.yml deleted file mode 120000 index 299736f8..00000000 --- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-003_vars/default-recursive-true.yml +++ /dev/null @@ -1 +0,0 @@ -../default-recursive-true.yml \ No newline at end of file diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-003_vars/list3.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-003_vars/list3.yml deleted file mode 100644 index 6d6bf8a4..00000000 --- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-003_vars/list3.yml +++ /dev/null @@ -1,3 +0,0 @@ -list3: "{{ [list1, list2]| - community.general.lists_mergeby('name', - recursive=true) }}" diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-004.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-004.yml deleted file mode 100644 index 8a473a73..00000000 --- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-004.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- name: 4. Merge recursive by 'name', keep lists - include_vars: - dir: example-004_vars -- debug: - var: list3 - when: debug|d(false)|bool -- template: - src: list3.out.j2 - dest: example-004.out diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-004_vars/default-recursive-true.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-004_vars/default-recursive-true.yml deleted file mode 120000 index 299736f8..00000000 --- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-004_vars/default-recursive-true.yml +++ /dev/null @@ -1 +0,0 @@ -../default-recursive-true.yml \ No newline at end of file diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-004_vars/list3.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-004_vars/list3.yml deleted file mode 100644 index a525ae4f..00000000 --- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-004_vars/list3.yml +++ /dev/null @@ -1,4 +0,0 @@ -list3: "{{ [list1, list2]| - community.general.lists_mergeby('name', - recursive=true, - list_merge='keep') }}" diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-005.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-005.yml deleted file mode 100644 index 8bdf92c3..00000000 --- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-005.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- name: 5. Merge recursive by 'name', append lists - include_vars: - dir: example-005_vars -- debug: - var: list3 - when: debug|d(false)|bool -- template: - src: list3.out.j2 - dest: example-005.out diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-005_vars/default-recursive-true.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-005_vars/default-recursive-true.yml deleted file mode 120000 index 299736f8..00000000 --- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-005_vars/default-recursive-true.yml +++ /dev/null @@ -1 +0,0 @@ -../default-recursive-true.yml \ No newline at end of file diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-005_vars/list3.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-005_vars/list3.yml deleted file mode 100644 index 65068610..00000000 --- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-005_vars/list3.yml +++ /dev/null @@ -1,4 +0,0 @@ -list3: "{{ [list1, list2]| - community.general.lists_mergeby('name', - recursive=true, - list_merge='append') }}" diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-006.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-006.yml deleted file mode 100644 index 9dcb9b68..00000000 --- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-006.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- name: 6. Merge recursive by 'name', prepend lists - include_vars: - dir: example-006_vars -- debug: - var: list3 - when: debug|d(false)|bool -- template: - src: list3.out.j2 - dest: example-006.out diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-006_vars/default-recursive-true.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-006_vars/default-recursive-true.yml deleted file mode 120000 index 299736f8..00000000 --- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-006_vars/default-recursive-true.yml +++ /dev/null @@ -1 +0,0 @@ -../default-recursive-true.yml \ No newline at end of file diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-006_vars/list3.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-006_vars/list3.yml deleted file mode 100644 index d880dfa9..00000000 --- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-006_vars/list3.yml +++ /dev/null @@ -1,4 +0,0 @@ -list3: "{{ [list1, list2]| - community.general.lists_mergeby('name', - recursive=true, - list_merge='prepend') }}" diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-007.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-007.yml deleted file mode 100644 index e1a6f2c7..00000000 --- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-007.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- name: 7. Merge recursive by 'name', append lists 'remove present' - include_vars: - dir: example-007_vars -- debug: - var: list3 - when: debug|d(false)|bool -- template: - src: list3.out.j2 - dest: example-007.out diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-007_vars/default-recursive-true.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-007_vars/default-recursive-true.yml deleted file mode 120000 index 299736f8..00000000 --- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-007_vars/default-recursive-true.yml +++ /dev/null @@ -1 +0,0 @@ -../default-recursive-true.yml \ No newline at end of file diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-007_vars/list3.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-007_vars/list3.yml deleted file mode 100644 index af71d6df..00000000 --- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-007_vars/list3.yml +++ /dev/null @@ -1,4 +0,0 @@ -list3: "{{ [list1, list2]| - community.general.lists_mergeby('name', - recursive=true, - list_merge='append_rp') }}" diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-008.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-008.yml deleted file mode 100644 index 18a59886..00000000 --- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-008.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- name: 8. Merge recursive by 'name', prepend lists 'remove present' - include_vars: - dir: example-008_vars -- debug: - var: list3 - when: debug|d(false)|bool -- template: - src: list3.out.j2 - dest: example-008.out diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-008_vars/default-recursive-true.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-008_vars/default-recursive-true.yml deleted file mode 120000 index 299736f8..00000000 --- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-008_vars/default-recursive-true.yml +++ /dev/null @@ -1 +0,0 @@ -../default-recursive-true.yml \ No newline at end of file diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-008_vars/list3.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-008_vars/list3.yml deleted file mode 100644 index 8a205785..00000000 --- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-008_vars/list3.yml +++ /dev/null @@ -1,4 +0,0 @@ -list3: "{{ [list1, list2]| - community.general.lists_mergeby('name', - recursive=true, - list_merge='prepend_rp') }}" diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/examples_all.rst.j2 b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/examples_all.rst.j2 deleted file mode 100644 index 014ff2d1..00000000 --- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/examples_all.rst.j2 +++ /dev/null @@ -1,8 +0,0 @@ -{% for i in examples %} -{{ i.label }} - -.. code-block:: {{ i.lang }} - - {{ lookup('file', i.file)|indent(2) }} - -{% endfor %} diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/list3.out.j2 b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/list3.out.j2 deleted file mode 100644 index 764ce3bd..00000000 --- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/list3.out.j2 +++ /dev/null @@ -1,2 +0,0 @@ -list3: -{{ list3|to_nice_yaml(indent=0) }} diff --git a/ansible_collections/community/general/docs/docsite/rst/filter_guide.rst b/ansible_collections/community/general/docs/docsite/rst/filter_guide.rst deleted file mode 100644 index bab223d3..00000000 --- a/ansible_collections/community/general/docs/docsite/rst/filter_guide.rst +++ /dev/null @@ -1,19 +0,0 @@ - -.. _ansible_collections.community.general.docsite.filter_guide: - -community.general Filter Guide -============================== - -The :ref:`community.general collection ` offers several useful filter plugins. - -.. toctree:: - :maxdepth: 2 - - filter_guide_paths - filter_guide_abstract_informations - filter_guide_working_with_times - filter_guide_working_with_versions - filter_guide_creating_identifiers - filter_guide_conversions - filter_guide_selecting_json_data - filter_guide_working_with_unicode diff --git a/ansible_collections/community/general/docs/docsite/rst/filter_guide_abstract_informations.rst b/ansible_collections/community/general/docs/docsite/rst/filter_guide_abstract_informations.rst deleted file mode 100644 index 04fb49bd..00000000 --- a/ansible_collections/community/general/docs/docsite/rst/filter_guide_abstract_informations.rst +++ /dev/null @@ -1,10 +0,0 @@ -Abstract transformations ------------------------- - -.. toctree:: - :maxdepth: 1 - - filter_guide_abstract_informations_dictionaries - filter_guide_abstract_informations_grouping - filter_guide_abstract_informations_merging_lists_of_dictionaries - filter_guide_abstract_informations_counting_elements_in_sequence diff --git a/ansible_collections/community/general/meta/runtime.yml b/ansible_collections/community/general/meta/runtime.yml deleted file mode 100644 index f5931666..00000000 --- a/ansible_collections/community/general/meta/runtime.yml +++ /dev/null @@ -1,629 +0,0 @@ ---- -requires_ansible: '>=2.9.10' -plugin_routing: - connection: - docker: - redirect: community.docker.docker - oc: - redirect: community.okd.oc - lookup: - gcp_storage_file: - redirect: community.google.gcp_storage_file - hashi_vault: - redirect: community.hashi_vault.hashi_vault - nios: - redirect: infoblox.nios_modules.nios_lookup - nios_next_ip: - redirect: infoblox.nios_modules.nios_next_ip - nios_next_network: - redirect: infoblox.nios_modules.nios_next_network - modules: - ali_instance_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.general.ali_instance_info instead. - docker_compose: - redirect: community.docker.docker_compose - docker_config: - redirect: community.docker.docker_config - docker_container: - redirect: community.docker.docker_container - docker_container_info: - redirect: community.docker.docker_container_info - docker_host_info: - redirect: community.docker.docker_host_info - docker_image: - redirect: community.docker.docker_image - docker_image_facts: - tombstone: - removal_version: 2.0.0 - warning_text: Use community.docker.docker_image_info instead. - docker_image_info: - redirect: community.docker.docker_image_info - docker_login: - redirect: community.docker.docker_login - docker_network: - redirect: community.docker.docker_network - docker_network_info: - redirect: community.docker.docker_network_info - docker_node: - redirect: community.docker.docker_node - docker_node_info: - redirect: community.docker.docker_node_info - docker_prune: - redirect: community.docker.docker_prune - docker_secret: - redirect: community.docker.docker_secret - docker_service: - tombstone: - removal_version: 2.0.0 - warning_text: Use community.docker.docker_compose instead. - docker_stack: - redirect: community.docker.docker_stack - docker_stack_info: - redirect: community.docker.docker_stack_info - docker_stack_task_info: - redirect: community.docker.docker_stack_task_info - docker_swarm: - redirect: community.docker.docker_swarm - docker_swarm_info: - redirect: community.docker.docker_swarm_info - docker_swarm_service: - redirect: community.docker.docker_swarm_service - docker_swarm_service_info: - redirect: community.docker.docker_swarm_service_info - docker_volume: - redirect: community.docker.docker_volume - docker_volume_info: - redirect: community.docker.docker_volume_info - foreman: - tombstone: - removal_version: 2.0.0 - warning_text: Use the modules from the theforeman.foreman collection instead. - gc_storage: - redirect: community.google.gc_storage - gcdns_record: - tombstone: - removal_version: 2.0.0 - warning_text: Use google.cloud.gcp_dns_resource_record_set instead. - gcdns_zone: - tombstone: - removal_version: 2.0.0 - warning_text: Use google.cloud.gcp_dns_managed_zone instead. - gce: - tombstone: - removal_version: 2.0.0 - warning_text: Use google.cloud.gcp_compute_instance instead. - gce_eip: - redirect: community.google.gce_eip - gce_img: - redirect: community.google.gce_img - gce_instance_template: - redirect: community.google.gce_instance_template - gce_labels: - redirect: community.google.gce_labels - gce_lb: - redirect: community.google.gce_lb - gce_mig: - redirect: community.google.gce_mig - gce_net: - redirect: community.google.gce_net - gce_pd: - redirect: community.google.gce_pd - gce_snapshot: - redirect: community.google.gce_snapshot - gce_tag: - redirect: community.google.gce_tag - gcp_backend_service: - tombstone: - removal_version: 2.0.0 - warning_text: Use google.cloud.gcp_compute_backend_service instead. - gcp_forwarding_rule: - tombstone: - removal_version: 2.0.0 - warning_text: Use google.cloud.gcp_compute_forwarding_rule or google.cloud.gcp_compute_global_forwarding_rule - instead. - gcp_healthcheck: - tombstone: - removal_version: 2.0.0 - warning_text: Use google.cloud.gcp_compute_health_check, google.cloud.gcp_compute_http_health_check - or google.cloud.gcp_compute_https_health_check instead. - gcp_target_proxy: - tombstone: - removal_version: 2.0.0 - warning_text: Use google.cloud.gcp_compute_target_http_proxy instead. - gcp_url_map: - tombstone: - removal_version: 2.0.0 - warning_text: Use google.cloud.gcp_compute_url_map instead. - gcpubsub: - redirect: community.google.gcpubsub - gcpubsub_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.google.gcpubsub_info instead. - gcpubsub_info: - redirect: community.google.gcpubsub_info - gcspanner: - tombstone: - removal_version: 2.0.0 - warning_text: Use google.cloud.gcp_spanner_database and/or google.cloud.gcp_spanner_instance - instead. - github_hooks: - tombstone: - removal_version: 2.0.0 - warning_text: Use community.general.github_webhook and community.general.github_webhook_info - instead. - hetzner_failover_ip: - redirect: community.hrobot.failover_ip - hetzner_failover_ip_info: - redirect: community.hrobot.failover_ip_info - hetzner_firewall: - redirect: community.hrobot.firewall - hetzner_firewall_info: - redirect: community.hrobot.firewall_info - hpilo_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.general.hpilo_info instead. - idrac_firmware: - redirect: dellemc.openmanage.idrac_firmware - idrac_redfish_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.general.idrac_redfish_info instead. - idrac_server_config_profile: - redirect: dellemc.openmanage.idrac_server_config_profile - jenkins_job_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.general.jenkins_job_info instead. - katello: - tombstone: - removal_version: 2.0.0 - warning_text: Use the modules from the theforeman.foreman collection instead. - kubevirt_cdi_upload: - redirect: community.kubevirt.kubevirt_cdi_upload - kubevirt_preset: - redirect: community.kubevirt.kubevirt_preset - kubevirt_pvc: - redirect: community.kubevirt.kubevirt_pvc - kubevirt_rs: - redirect: community.kubevirt.kubevirt_rs - kubevirt_template: - redirect: community.kubevirt.kubevirt_template - kubevirt_vm: - redirect: community.kubevirt.kubevirt_vm - ldap_attr: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.general.ldap_attrs instead. - logicmonitor: - tombstone: - removal_version: 1.0.0 - warning_text: The logicmonitor_facts module is no longer maintained and the - API used has been disabled in 2017. - logicmonitor_facts: - tombstone: - removal_version: 1.0.0 - warning_text: The logicmonitor_facts module is no longer maintained and the - API used has been disabled in 2017. - memset_memstore_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.general.memset_memstore_info instead. - memset_server_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.general.memset_server_info instead. - na_cdot_aggregate: - tombstone: - removal_version: 2.0.0 - warning_text: Use netapp.ontap.na_ontap_aggregate instead. - na_cdot_license: - tombstone: - removal_version: 2.0.0 - warning_text: Use netapp.ontap.na_ontap_license instead. - na_cdot_lun: - tombstone: - removal_version: 2.0.0 - warning_text: Use netapp.ontap.na_ontap_lun instead. - na_cdot_qtree: - tombstone: - removal_version: 2.0.0 - warning_text: Use netapp.ontap.na_ontap_qtree instead. - na_cdot_svm: - tombstone: - removal_version: 2.0.0 - warning_text: Use netapp.ontap.na_ontap_svm instead. - na_cdot_user: - tombstone: - removal_version: 2.0.0 - warning_text: Use netapp.ontap.na_ontap_user instead. - na_cdot_user_role: - tombstone: - removal_version: 2.0.0 - warning_text: Use netapp.ontap.na_ontap_user_role instead. - na_cdot_volume: - tombstone: - removal_version: 2.0.0 - warning_text: Use netapp.ontap.na_ontap_volume instead. - na_ontap_gather_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use netapp.ontap.na_ontap_info instead. - nginx_status_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.general.nginx_status_info instead. - nios_a_record: - redirect: infoblox.nios_modules.nios_a_record - nios_aaaa_record: - redirect: infoblox.nios_modules.nios_aaaa_record - nios_cname_record: - redirect: infoblox.nios_modules.nios_cname_record - nios_dns_view: - redirect: infoblox.nios_modules.nios_dns_view - nios_fixed_address: - redirect: infoblox.nios_modules.nios_fixed_address - nios_host_record: - redirect: infoblox.nios_modules.nios_host_record - nios_member: - redirect: infoblox.nios_modules.nios_member - nios_mx_record: - redirect: infoblox.nios_modules.nios_mx_record - nios_naptr_record: - redirect: infoblox.nios_modules.nios_naptr_record - nios_network: - redirect: infoblox.nios_modules.nios_network - nios_network_view: - redirect: infoblox.nios_modules.nios_network_view - nios_nsgroup: - redirect: infoblox.nios_modules.nios_nsgroup - nios_ptr_record: - redirect: infoblox.nios_modules.nios_ptr_record - nios_srv_record: - redirect: infoblox.nios_modules.nios_srv_record - nios_txt_record: - redirect: infoblox.nios_modules.nios_txt_record - nios_zone: - redirect: infoblox.nios_modules.nios_zone - ome_device_info: - redirect: dellemc.openmanage.ome_device_info - one_image_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.general.one_image_info instead. - onepassword_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.general.onepassword_info instead. - oneview_datacenter_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.general.oneview_datacenter_info instead. - oneview_enclosure_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.general.oneview_enclosure_info instead. - oneview_ethernet_network_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.general.oneview_ethernet_network_info instead. - oneview_fc_network_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.general.oneview_fc_network_info instead. - oneview_fcoe_network_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.general.oneview_fcoe_network_info instead. - oneview_logical_interconnect_group_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.general.oneview_logical_interconnect_group_info - instead. - oneview_network_set_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.general.oneview_network_set_info instead. - oneview_san_manager_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.general.oneview_san_manager_info instead. - online_server_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.general.online_server_info instead. - online_user_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.general.online_user_info instead. - ovirt: - tombstone: - removal_version: 3.0.0 - warning_text: Use ovirt.ovirt.ovirt_vm instead. - ovirt_affinity_label_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use ovirt.ovirt.ovirt_affinity_label_info instead. - ovirt_api_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use ovirt.ovirt.ovirt_api_info instead. - ovirt_cluster_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use ovirt.ovirt.ovirt_cluster_info instead. - ovirt_datacenter_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use ovirt.ovirt.ovirt_datacenter_info instead. - ovirt_disk_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use ovirt.ovirt.ovirt_disk_info instead. - ovirt_event_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use ovirt.ovirt.ovirt_event_info instead. - ovirt_external_provider_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use ovirt.ovirt.ovirt_external_provider_info instead. - ovirt_group_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use ovirt.ovirt.ovirt_group_info instead. - ovirt_host_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use ovirt.ovirt.ovirt_host_info instead. - ovirt_host_storage_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use ovirt.ovirt.ovirt_host_storage_info instead. - ovirt_network_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use ovirt.ovirt.ovirt_network_info instead. - ovirt_nic_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use ovirt.ovirt.ovirt_nic_info instead. - ovirt_permission_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use ovirt.ovirt.ovirt_permission_info instead. - ovirt_quota_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use ovirt.ovirt.ovirt_quota_info instead. - ovirt_scheduling_policy_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use ovirt.ovirt.ovirt_scheduling_policy_info instead. - ovirt_snapshot_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use ovirt.ovirt.ovirt_snapshot_info instead. - ovirt_storage_domain_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use ovirt.ovirt.ovirt_storage_domain_info instead. - ovirt_storage_template_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use ovirt.ovirt.ovirt_storage_template_info instead. - ovirt_storage_vm_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use ovirt.ovirt.ovirt_storage_vm_info instead. - ovirt_tag_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use ovirt.ovirt.ovirt_tag_info instead. - ovirt_template_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use ovirt.ovirt.ovirt_template_info instead. - ovirt_user_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use ovirt.ovirt.ovirt_user_info instead. - ovirt_vm_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use ovirt.ovirt.ovirt_vm_info instead. - ovirt_vmpool_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use ovirt.ovirt.ovirt_vmpool_info instead. - postgresql_copy: - redirect: community.postgresql.postgresql_copy - postgresql_db: - redirect: community.postgresql.postgresql_db - postgresql_ext: - redirect: community.postgresql.postgresql_ext - postgresql_idx: - redirect: community.postgresql.postgresql_idx - postgresql_info: - redirect: community.postgresql.postgresql_info - postgresql_lang: - redirect: community.postgresql.postgresql_lang - postgresql_membership: - redirect: community.postgresql.postgresql_membership - postgresql_owner: - redirect: community.postgresql.postgresql_owner - postgresql_pg_hba: - redirect: community.postgresql.postgresql_pg_hba - postgresql_ping: - redirect: community.postgresql.postgresql_ping - postgresql_privs: - redirect: community.postgresql.postgresql_privs - postgresql_publication: - redirect: community.postgresql.postgresql_publication - postgresql_query: - redirect: community.postgresql.postgresql_query - postgresql_schema: - redirect: community.postgresql.postgresql_schema - postgresql_sequence: - redirect: community.postgresql.postgresql_sequence - postgresql_set: - redirect: community.postgresql.postgresql_set - postgresql_slot: - redirect: community.postgresql.postgresql_slot - postgresql_subscription: - redirect: community.postgresql.postgresql_subscription - postgresql_table: - redirect: community.postgresql.postgresql_table - postgresql_tablespace: - redirect: community.postgresql.postgresql_tablespace - postgresql_user: - redirect: community.postgresql.postgresql_user - postgresql_user_obj_stat_info: - redirect: community.postgresql.postgresql_user_obj_stat_info - purefa_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use purestorage.flasharray.purefa_info instead. - purefb_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use purestorage.flashblade.purefb_info instead. - python_requirements_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.general.python_requirements_info instead. - redfish_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.general.redfish_info instead. - scaleway_image_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.general.scaleway_image_info instead. - scaleway_ip_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.general.scaleway_ip_info instead. - scaleway_organization_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.general.scaleway_organization_info instead. - scaleway_security_group_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.general.scaleway_security_group_info instead. - scaleway_server_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.general.scaleway_server_info instead. - scaleway_snapshot_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.general.scaleway_snapshot_info instead. - scaleway_volume_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.general.scaleway_volume_info instead. - sf_account_manager: - tombstone: - removal_version: 2.0.0 - warning_text: Use netapp.elementsw.na_elementsw_account instead. - sf_check_connections: - tombstone: - removal_version: 2.0.0 - warning_text: Use netapp.elementsw.na_elementsw_check_connections instead. - sf_snapshot_schedule_manager: - tombstone: - removal_version: 2.0.0 - warning_text: Use netapp.elementsw.na_elementsw_snapshot_schedule instead. - sf_volume_access_group_manager: - tombstone: - removal_version: 2.0.0 - warning_text: Use netapp.elementsw.na_elementsw_access_group instead. - sf_volume_manager: - tombstone: - removal_version: 2.0.0 - warning_text: Use netapp.elementsw.na_elementsw_volume instead. - smartos_image_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.general.smartos_image_info instead. - vertica_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.general.vertica_info instead. - xenserver_guest_facts: - tombstone: - removal_version: 3.0.0 - warning_text: Use community.general.xenserver_guest_info instead. - doc_fragments: - _gcp: - redirect: community.google._gcp - docker: - redirect: community.docker.docker - hetzner: - redirect: community.hrobot.robot - kubevirt_common_options: - redirect: community.kubevirt.kubevirt_common_options - kubevirt_vm_options: - redirect: community.kubevirt.kubevirt_vm_options - nios: - redirect: infoblox.nios_modules.nios - postgresql: - redirect: community.postgresql.postgresql - module_utils: - docker.common: - redirect: community.docker.common - docker.swarm: - redirect: community.docker.swarm - gcdns: - redirect: community.google.gcdns - gce: - redirect: community.google.gce - gcp: - redirect: community.google.gcp - hetzner: - redirect: community.hrobot.robot - kubevirt: - redirect: community.kubevirt.kubevirt - net_tools.nios.api: - redirect: infoblox.nios_modules.api - postgresql: - redirect: community.postgresql.postgresql - remote_management.dellemc.dellemc_idrac: - redirect: dellemc.openmanage.dellemc_idrac - remote_management.dellemc.ome: - redirect: dellemc.openmanage.ome - callback: - actionable: - tombstone: - removal_version: 2.0.0 - warning_text: Use the 'default' callback plugin with 'display_skipped_hosts - = no' and 'display_ok_hosts = no' options. - full_skip: - tombstone: - removal_version: 2.0.0 - warning_text: Use the 'default' callback plugin with 'display_skipped_hosts - = no' option. - stderr: - tombstone: - removal_version: 2.0.0 - warning_text: Use the 'default' callback plugin with 'display_failed_stderr - = yes' option. - inventory: - docker_machine: - redirect: community.docker.docker_machine - docker_swarm: - redirect: community.docker.docker_swarm - kubevirt: - redirect: community.kubevirt.kubevirt - filter: - path_join: - # The ansible.builtin.path_join filter has been added in ansible-base 2.10. - # Since plugin routing is only available since ansible-base 2.10, this - # redirect will be used for ansible-base 2.10 or later, and the included - # path_join filter will be used for Ansible 2.9 or earlier. - redirect: ansible.builtin.path_join diff --git a/ansible_collections/community/general/plugins/action/iptables_state.py b/ansible_collections/community/general/plugins/action/iptables_state.py deleted file mode 120000 index 864608d5..00000000 --- a/ansible_collections/community/general/plugins/action/iptables_state.py +++ /dev/null @@ -1 +0,0 @@ -system/iptables_state.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/action/shutdown.py b/ansible_collections/community/general/plugins/action/shutdown.py deleted file mode 120000 index 503b1ec0..00000000 --- a/ansible_collections/community/general/plugins/action/shutdown.py +++ /dev/null @@ -1 +0,0 @@ -system/shutdown.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/action/system/iptables_state.py b/ansible_collections/community/general/plugins/action/system/iptables_state.py deleted file mode 100644 index b8ae1a5d..00000000 --- a/ansible_collections/community/general/plugins/action/system/iptables_state.py +++ /dev/null @@ -1,186 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2020, quidame -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -import time - -from ansible.plugins.action import ActionBase -from ansible.errors import AnsibleActionFail, AnsibleConnectionFailure -from ansible.utils.vars import merge_hash -from ansible.utils.display import Display - -display = Display() - - -class ActionModule(ActionBase): - - # Keep internal params away from user interactions - _VALID_ARGS = frozenset(('path', 'state', 'table', 'noflush', 'counters', 'modprobe', 'ip_version', 'wait')) - DEFAULT_SUDOABLE = True - - MSG_ERROR__ASYNC_AND_POLL_NOT_ZERO = ( - "This module doesn't support async>0 and poll>0 when its 'state' param " - "is set to 'restored'. To enable its rollback feature (that needs the " - "module to run asynchronously on the remote), please set task attribute " - "'poll' (=%s) to 0, and 'async' (=%s) to a value >2 and not greater than " - "'ansible_timeout' (=%s) (recommended).") - MSG_WARNING__NO_ASYNC_IS_NO_ROLLBACK = ( - "Attempts to restore iptables state without rollback in case of mistake " - "may lead the ansible controller to loose access to the hosts and never " - "regain it before fixing firewall rules through a serial console, or any " - "other way except SSH. Please set task attribute 'poll' (=%s) to 0, and " - "'async' (=%s) to a value >2 and not greater than 'ansible_timeout' (=%s) " - "(recommended).") - MSG_WARNING__ASYNC_GREATER_THAN_TIMEOUT = ( - "You attempt to restore iptables state with rollback in case of mistake, " - "but with settings that will lead this rollback to happen AFTER that the " - "controller will reach its own timeout. Please set task attribute 'poll' " - "(=%s) to 0, and 'async' (=%s) to a value >2 and not greater than " - "'ansible_timeout' (=%s) (recommended).") - - def _async_result(self, async_status_args, task_vars, timeout): - ''' - Retrieve results of the asynchonous task, and display them in place of - the async wrapper results (those with the ansible_job_id key). - ''' - async_status = self._task.copy() - async_status.args = async_status_args - async_status.action = 'ansible.builtin.async_status' - async_status.async_val = 0 - async_action = self._shared_loader_obj.action_loader.get( - async_status.action, task=async_status, connection=self._connection, - play_context=self._play_context, loader=self._loader, templar=self._templar, - shared_loader_obj=self._shared_loader_obj) - - if async_status.args['mode'] == 'cleanup': - return async_action.run(task_vars=task_vars) - - # At least one iteration is required, even if timeout is 0. - for dummy in range(max(1, timeout)): - async_result = async_action.run(task_vars=task_vars) - if async_result.get('finished', 0) == 1: - break - time.sleep(min(1, timeout)) - - return async_result - - def run(self, tmp=None, task_vars=None): - - self._supports_check_mode = True - self._supports_async = True - - result = super(ActionModule, self).run(tmp, task_vars) - del tmp # tmp no longer has any effect - - if not result.get('skipped'): - - # FUTURE: better to let _execute_module calculate this internally? - wrap_async = self._task.async_val and not self._connection.has_native_async - - # Set short names for values we'll have to compare or reuse - task_poll = self._task.poll - task_async = self._task.async_val - check_mode = self._play_context.check_mode - max_timeout = self._connection._play_context.timeout - module_args = self._task.args - - if module_args.get('state', None) == 'restored': - if not wrap_async: - if not check_mode: - display.warning(self.MSG_WARNING__NO_ASYNC_IS_NO_ROLLBACK % ( - task_poll, - task_async, - max_timeout)) - elif task_poll: - raise AnsibleActionFail(self.MSG_ERROR__ASYNC_AND_POLL_NOT_ZERO % ( - task_poll, - task_async, - max_timeout)) - else: - if task_async > max_timeout and not check_mode: - display.warning(self.MSG_WARNING__ASYNC_GREATER_THAN_TIMEOUT % ( - task_poll, - task_async, - max_timeout)) - - # inject the async directory based on the shell option into the - # module args - async_dir = self.get_shell_option('async_dir', default="~/.ansible_async") - - # Bind the loop max duration to consistent values on both - # remote and local sides (if not the same, make the loop - # longer on the controller); and set a backup file path. - module_args['_timeout'] = task_async - module_args['_back'] = '%s/iptables.state' % async_dir - async_status_args = dict(mode='status') - confirm_cmd = 'rm -f %s' % module_args['_back'] - starter_cmd = 'touch %s.starter' % module_args['_back'] - remaining_time = max(task_async, max_timeout) - - # do work! - result = merge_hash(result, self._execute_module(module_args=module_args, task_vars=task_vars, wrap_async=wrap_async)) - - # Then the 3-steps "go ahead or rollback": - # 1. Catch early errors of the module (in asynchronous task) if any. - # Touch a file on the target to signal the module to process now. - # 2. Reset connection to ensure a persistent one will not be reused. - # 3. Confirm the restored state by removing the backup on the remote. - # Retrieve the results of the asynchronous task to return them. - if '_back' in module_args: - async_status_args['jid'] = result.get('ansible_job_id', None) - if async_status_args['jid'] is None: - raise AnsibleActionFail("Unable to get 'ansible_job_id'.") - - # Catch early errors due to missing mandatory option, bad - # option type/value, missing required system command, etc. - result = merge_hash(result, self._async_result(async_status_args, task_vars, 0)) - - # The module is aware to not process the main iptables-restore - # command before finding (and deleting) the 'starter' cookie on - # the host, so the previous query will not reach ssh timeout. - dummy = self._low_level_execute_command(starter_cmd, sudoable=self.DEFAULT_SUDOABLE) - - # As the main command is not yet executed on the target, here - # 'finished' means 'failed before main command be executed'. - if not result['finished']: - try: - self._connection.reset() - except AttributeError: - pass - - for dummy in range(max_timeout): - time.sleep(1) - remaining_time -= 1 - # - AnsibleConnectionFailure covers rejected requests (i.e. - # by rules with '--jump REJECT') - # - ansible_timeout is able to cover dropped requests (due - # to a rule or policy DROP) if not lower than async_val. - try: - dummy = self._low_level_execute_command(confirm_cmd, sudoable=self.DEFAULT_SUDOABLE) - break - except AnsibleConnectionFailure: - continue - - result = merge_hash(result, self._async_result(async_status_args, task_vars, remaining_time)) - - # Cleanup async related stuff and internal params - for key in ('ansible_job_id', 'results_file', 'started', 'finished'): - if result.get(key): - del result[key] - - if result.get('invocation', {}).get('module_args'): - for key in ('_back', '_timeout', '_async_dir', 'jid'): - if result['invocation']['module_args'].get(key): - del result['invocation']['module_args'][key] - - async_status_args['mode'] = 'cleanup' - dummy = self._async_result(async_status_args, task_vars, 0) - - if not wrap_async: - # remove a temporary path we created - self._remove_tmp_path(self._connection._shell.tmpdir) - - return result diff --git a/ansible_collections/community/general/plugins/action/system/shutdown.py b/ansible_collections/community/general/plugins/action/system/shutdown.py deleted file mode 100644 index 19813b08..00000000 --- a/ansible_collections/community/general/plugins/action/system/shutdown.py +++ /dev/null @@ -1,212 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2020, Amin Vakil -# Copyright: (c) 2016-2018, Matt Davis -# Copyright: (c) 2018, Sam Doran -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from ansible.errors import AnsibleError, AnsibleConnectionFailure -from ansible.module_utils.common.text.converters import to_native, to_text -from ansible.module_utils.common.collections import is_string -from ansible.plugins.action import ActionBase -from ansible.utils.display import Display - -display = Display() - - -class TimedOutException(Exception): - pass - - -class ActionModule(ActionBase): - TRANSFERS_FILES = False - _VALID_ARGS = frozenset(( - 'msg', - 'delay', - 'search_paths' - )) - - DEFAULT_CONNECT_TIMEOUT = None - DEFAULT_PRE_SHUTDOWN_DELAY = 0 - DEFAULT_SHUTDOWN_MESSAGE = 'Shut down initiated by Ansible' - DEFAULT_SHUTDOWN_COMMAND = 'shutdown' - DEFAULT_SHUTDOWN_COMMAND_ARGS = '-h {delay_min} "{message}"' - DEFAULT_SUDOABLE = True - - SHUTDOWN_COMMANDS = { - 'alpine': 'poweroff', - 'vmkernel': 'halt', - } - - SHUTDOWN_COMMAND_ARGS = { - 'alpine': '', - 'void': '-h +{delay_min} "{message}"', - 'freebsd': '-h +{delay_sec}s "{message}"', - 'linux': DEFAULT_SHUTDOWN_COMMAND_ARGS, - 'macosx': '-h +{delay_min} "{message}"', - 'openbsd': '-h +{delay_min} "{message}"', - 'solaris': '-y -g {delay_sec} -i 5 "{message}"', - 'sunos': '-y -g {delay_sec} -i 5 "{message}"', - 'vmkernel': '-d {delay_sec}', - 'aix': '-Fh', - } - - def __init__(self, *args, **kwargs): - super(ActionModule, self).__init__(*args, **kwargs) - - @property - def delay(self): - return self._check_delay('delay', self.DEFAULT_PRE_SHUTDOWN_DELAY) - - def _check_delay(self, key, default): - """Ensure that the value is positive or zero""" - value = int(self._task.args.get(key, default)) - if value < 0: - value = 0 - return value - - def _get_value_from_facts(self, variable_name, distribution, default_value): - """Get dist+version specific args first, then distribution, then family, lastly use default""" - attr = getattr(self, variable_name) - value = attr.get( - distribution['name'] + distribution['version'], - attr.get( - distribution['name'], - attr.get( - distribution['family'], - getattr(self, default_value)))) - return value - - def get_shutdown_command_args(self, distribution): - args = self._get_value_from_facts('SHUTDOWN_COMMAND_ARGS', distribution, 'DEFAULT_SHUTDOWN_COMMAND_ARGS') - # Convert seconds to minutes. If less that 60, set it to 0. - delay_sec = self.delay - shutdown_message = self._task.args.get('msg', self.DEFAULT_SHUTDOWN_MESSAGE) - return args.format(delay_sec=delay_sec, delay_min=delay_sec // 60, message=shutdown_message) - - def get_distribution(self, task_vars): - # FIXME: only execute the module if we don't already have the facts we need - distribution = {} - display.debug('{action}: running setup module to get distribution'.format(action=self._task.action)) - module_output = self._execute_module( - task_vars=task_vars, - module_name='ansible.legacy.setup', - module_args={'gather_subset': 'min'}) - try: - if module_output.get('failed', False): - raise AnsibleError('Failed to determine system distribution. {0}, {1}'.format( - to_native(module_output['module_stdout']).strip(), - to_native(module_output['module_stderr']).strip())) - distribution['name'] = module_output['ansible_facts']['ansible_distribution'].lower() - distribution['version'] = to_text(module_output['ansible_facts']['ansible_distribution_version'].split('.')[0]) - distribution['family'] = to_text(module_output['ansible_facts']['ansible_os_family'].lower()) - display.debug("{action}: distribution: {dist}".format(action=self._task.action, dist=distribution)) - return distribution - except KeyError as ke: - raise AnsibleError('Failed to get distribution information. Missing "{0}" in output.'.format(ke.args[0])) - - def get_shutdown_command(self, task_vars, distribution): - shutdown_bin = self._get_value_from_facts('SHUTDOWN_COMMANDS', distribution, 'DEFAULT_SHUTDOWN_COMMAND') - default_search_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin'] - search_paths = self._task.args.get('search_paths', default_search_paths) - - # FIXME: switch all this to user arg spec validation methods when they are available - # Convert bare strings to a list - if is_string(search_paths): - search_paths = [search_paths] - - # Error if we didn't get a list - err_msg = "'search_paths' must be a string or flat list of strings, got {0}" - try: - incorrect_type = any(not is_string(x) for x in search_paths) - if not isinstance(search_paths, list) or incorrect_type: - raise TypeError - except TypeError: - raise AnsibleError(err_msg.format(search_paths)) - - display.debug('{action}: running find module looking in {paths} to get path for "{command}"'.format( - action=self._task.action, - command=shutdown_bin, - paths=search_paths)) - find_result = self._execute_module( - task_vars=task_vars, - # prevent collection search by calling with ansible.legacy (still allows library/ override of find) - module_name='ansible.legacy.find', - module_args={ - 'paths': search_paths, - 'patterns': [shutdown_bin], - 'file_type': 'any' - } - ) - - full_path = [x['path'] for x in find_result['files']] - if not full_path: - raise AnsibleError('Unable to find command "{0}" in search paths: {1}'.format(shutdown_bin, search_paths)) - self._shutdown_command = full_path[0] - return self._shutdown_command - - def perform_shutdown(self, task_vars, distribution): - result = {} - shutdown_result = {} - shutdown_command = self.get_shutdown_command(task_vars, distribution) - shutdown_command_args = self.get_shutdown_command_args(distribution) - shutdown_command_exec = '{0} {1}'.format(shutdown_command, shutdown_command_args) - - self.cleanup(force=True) - try: - display.vvv("{action}: shutting down server...".format(action=self._task.action)) - display.debug("{action}: shutting down server with command '{command}'".format(action=self._task.action, command=shutdown_command_exec)) - if self._play_context.check_mode: - shutdown_result['rc'] = 0 - else: - shutdown_result = self._low_level_execute_command(shutdown_command_exec, sudoable=self.DEFAULT_SUDOABLE) - except AnsibleConnectionFailure as e: - # If the connection is closed too quickly due to the system being shutdown, carry on - display.debug('{action}: AnsibleConnectionFailure caught and handled: {error}'.format(action=self._task.action, error=to_text(e))) - shutdown_result['rc'] = 0 - - if shutdown_result['rc'] != 0: - result['failed'] = True - result['shutdown'] = False - result['msg'] = "Shutdown command failed. Error was {stdout}, {stderr}".format( - stdout=to_native(shutdown_result['stdout'].strip()), - stderr=to_native(shutdown_result['stderr'].strip())) - return result - - result['failed'] = False - result['shutdown_command'] = shutdown_command_exec - return result - - def run(self, tmp=None, task_vars=None): - self._supports_check_mode = True - self._supports_async = True - - # If running with local connection, fail so we don't shutdown ourself - if self._connection.transport == 'local' and (not self._play_context.check_mode): - msg = 'Running {0} with local connection would shutdown the control node.'.format(self._task.action) - return {'changed': False, 'elapsed': 0, 'shutdown': False, 'failed': True, 'msg': msg} - - if task_vars is None: - task_vars = {} - - result = super(ActionModule, self).run(tmp, task_vars) - - if result.get('skipped', False) or result.get('failed', False): - return result - - distribution = self.get_distribution(task_vars) - - # Initiate shutdown - shutdown_result = self.perform_shutdown(task_vars, distribution) - - if shutdown_result['failed']: - result = shutdown_result - return result - - result['shutdown'] = True - result['changed'] = True - result['shutdown_command'] = shutdown_result['shutdown_command'] - - return result diff --git a/ansible_collections/community/general/plugins/become/machinectl.py b/ansible_collections/community/general/plugins/become/machinectl.py deleted file mode 100644 index aebb0891..00000000 --- a/ansible_collections/community/general/plugins/become/machinectl.py +++ /dev/null @@ -1,88 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - name: machinectl - short_description: Systemd's machinectl privilege escalation - description: - - This become plugins allows your remote/login user to execute commands as another user via the machinectl utility. - author: Ansible Core Team - options: - become_user: - description: User you 'become' to execute the task - default: '' - ini: - - section: privilege_escalation - key: become_user - - section: machinectl_become_plugin - key: user - vars: - - name: ansible_become_user - - name: ansible_machinectl_user - env: - - name: ANSIBLE_BECOME_USER - - name: ANSIBLE_MACHINECTL_USER - become_exe: - description: Machinectl executable - default: machinectl - ini: - - section: privilege_escalation - key: become_exe - - section: machinectl_become_plugin - key: executable - vars: - - name: ansible_become_exe - - name: ansible_machinectl_exe - env: - - name: ANSIBLE_BECOME_EXE - - name: ANSIBLE_MACHINECTL_EXE - become_flags: - description: Options to pass to machinectl - default: '' - ini: - - section: privilege_escalation - key: become_flags - - section: machinectl_become_plugin - key: flags - vars: - - name: ansible_become_flags - - name: ansible_machinectl_flags - env: - - name: ANSIBLE_BECOME_FLAGS - - name: ANSIBLE_MACHINECTL_FLAGS - become_pass: - description: Password for machinectl - required: False - vars: - - name: ansible_become_password - - name: ansible_become_pass - - name: ansible_machinectl_pass - env: - - name: ANSIBLE_BECOME_PASS - - name: ANSIBLE_MACHINECTL_PASS - ini: - - section: machinectl_become_plugin - key: password -''' - -from ansible.plugins.become import BecomeBase - - -class BecomeModule(BecomeBase): - - name = 'community.general.machinectl' - - def build_become_command(self, cmd, shell): - super(BecomeModule, self).build_become_command(cmd, shell) - - if not cmd: - return cmd - - become = self.get_option('become_exe') - - flags = self.get_option('become_flags') - user = self.get_option('become_user') - return '%s -q shell %s %s@ %s' % (become, flags, user, cmd) diff --git a/ansible_collections/community/general/plugins/cache/redis.py b/ansible_collections/community/general/plugins/cache/redis.py deleted file mode 100644 index 3c73d8b5..00000000 --- a/ansible_collections/community/general/plugins/cache/redis.py +++ /dev/null @@ -1,243 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2014, Brian Coca, Josh Drake, et al -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: redis - short_description: Use Redis DB for cache - description: - - This cache uses JSON formatted, per host records saved in Redis. - requirements: - - redis>=2.4.5 (python lib) - options: - _uri: - description: - - A colon separated string of connection information for Redis. - - The format is C(host:port:db:password), for example C(localhost:6379:0:changeme). - - To use encryption in transit, prefix the connection with C(tls://), as in C(tls://localhost:6379:0:changeme). - - To use redis sentinel, use separator C(;), for example C(localhost:26379;localhost:26379;0:changeme). Requires redis>=2.9.0. - required: True - env: - - name: ANSIBLE_CACHE_PLUGIN_CONNECTION - ini: - - key: fact_caching_connection - section: defaults - _prefix: - description: User defined prefix to use when creating the DB entries - default: ansible_facts - env: - - name: ANSIBLE_CACHE_PLUGIN_PREFIX - ini: - - key: fact_caching_prefix - section: defaults - _keyset_name: - description: User defined name for cache keyset name. - default: ansible_cache_keys - env: - - name: ANSIBLE_CACHE_REDIS_KEYSET_NAME - ini: - - key: fact_caching_redis_keyset_name - section: defaults - version_added: 1.3.0 - _sentinel_service_name: - description: The redis sentinel service name (or referenced as cluster name). - env: - - name: ANSIBLE_CACHE_REDIS_SENTINEL - ini: - - key: fact_caching_redis_sentinel - section: defaults - version_added: 1.3.0 - _timeout: - default: 86400 - description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire - env: - - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT - ini: - - key: fact_caching_timeout - section: defaults - type: integer -''' - -import re -import time -import json - -from ansible import constants as C -from ansible.errors import AnsibleError -from ansible.module_utils.common.text.converters import to_native -from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder -from ansible.plugins.cache import BaseCacheModule -from ansible.release import __version__ as ansible_base_version -from ansible.utils.display import Display - -try: - from redis import StrictRedis, VERSION - HAS_REDIS = True -except ImportError: - HAS_REDIS = False - -display = Display() - - -class CacheModule(BaseCacheModule): - """ - A caching module backed by redis. - - Keys are maintained in a zset with their score being the timestamp - when they are inserted. This allows for the usage of 'zremrangebyscore' - to expire keys. This mechanism is used or a pattern matched 'scan' for - performance. - """ - _sentinel_service_name = None - re_url_conn = re.compile(r'^([^:]+|\[[^]]+\]):(\d+):(\d+)(?::(.*))?$') - re_sent_conn = re.compile(r'^(.*):(\d+)$') - - def __init__(self, *args, **kwargs): - uri = '' - - try: - super(CacheModule, self).__init__(*args, **kwargs) - if self.get_option('_uri'): - uri = self.get_option('_uri') - self._timeout = float(self.get_option('_timeout')) - self._prefix = self.get_option('_prefix') - self._keys_set = self.get_option('_keyset_name') - self._sentinel_service_name = self.get_option('_sentinel_service_name') - except KeyError: - # TODO: remove once we no longer support Ansible 2.9 - if not ansible_base_version.startswith('2.9.'): - raise AnsibleError("Do not import CacheModules directly. Use ansible.plugins.loader.cache_loader instead.") - if C.CACHE_PLUGIN_CONNECTION: - uri = C.CACHE_PLUGIN_CONNECTION - self._timeout = float(C.CACHE_PLUGIN_TIMEOUT) - self._prefix = C.CACHE_PLUGIN_PREFIX - self._keys_set = 'ansible_cache_keys' - - if not HAS_REDIS: - raise AnsibleError("The 'redis' python module (version 2.4.5 or newer) is required for the redis fact cache, 'pip install redis'") - - self._cache = {} - kw = {} - - # tls connection - tlsprefix = 'tls://' - if uri.startswith(tlsprefix): - kw['ssl'] = True - uri = uri[len(tlsprefix):] - - # redis sentinel connection - if self._sentinel_service_name: - self._db = self._get_sentinel_connection(uri, kw) - # normal connection - else: - connection = self._parse_connection(self.re_url_conn, uri) - self._db = StrictRedis(*connection, **kw) - - display.vv('Redis connection: %s' % self._db) - - @staticmethod - def _parse_connection(re_patt, uri): - match = re_patt.match(uri) - if not match: - raise AnsibleError("Unable to parse connection string") - return match.groups() - - def _get_sentinel_connection(self, uri, kw): - """ - get sentinel connection details from _uri - """ - try: - from redis.sentinel import Sentinel - except ImportError: - raise AnsibleError("The 'redis' python module (version 2.9.0 or newer) is required to use redis sentinel.") - - if ';' not in uri: - raise AnsibleError('_uri does not have sentinel syntax.') - - # format: "localhost:26379;localhost2:26379;0:changeme" - connections = uri.split(';') - connection_args = connections.pop(-1) - if len(connection_args) > 0: # hanle if no db nr is given - connection_args = connection_args.split(':') - kw['db'] = connection_args.pop(0) - try: - kw['password'] = connection_args.pop(0) - except IndexError: - pass # password is optional - - sentinels = [self._parse_connection(self.re_sent_conn, shost) for shost in connections] - display.vv('\nUsing redis sentinels: %s' % sentinels) - scon = Sentinel(sentinels, **kw) - try: - return scon.master_for(self._sentinel_service_name, socket_timeout=0.2) - except Exception as exc: - raise AnsibleError('Could not connect to redis sentinel: %s' % to_native(exc)) - - def _make_key(self, key): - return self._prefix + key - - def get(self, key): - - if key not in self._cache: - value = self._db.get(self._make_key(key)) - # guard against the key not being removed from the zset; - # this could happen in cases where the timeout value is changed - # between invocations - if value is None: - self.delete(key) - raise KeyError - self._cache[key] = json.loads(value, cls=AnsibleJSONDecoder) - - return self._cache.get(key) - - def set(self, key, value): - - value2 = json.dumps(value, cls=AnsibleJSONEncoder, sort_keys=True, indent=4) - if self._timeout > 0: # a timeout of 0 is handled as meaning 'never expire' - self._db.setex(self._make_key(key), int(self._timeout), value2) - else: - self._db.set(self._make_key(key), value2) - - if VERSION[0] == 2: - self._db.zadd(self._keys_set, time.time(), key) - else: - self._db.zadd(self._keys_set, {key: time.time()}) - self._cache[key] = value - - def _expire_keys(self): - if self._timeout > 0: - expiry_age = time.time() - self._timeout - self._db.zremrangebyscore(self._keys_set, 0, expiry_age) - - def keys(self): - self._expire_keys() - return self._db.zrange(self._keys_set, 0, -1) - - def contains(self, key): - self._expire_keys() - return (self._db.zrank(self._keys_set, key) is not None) - - def delete(self, key): - if key in self._cache: - del self._cache[key] - self._db.delete(self._make_key(key)) - self._db.zrem(self._keys_set, key) - - def flush(self): - for key in list(self.keys()): - self.delete(key) - - def copy(self): - # TODO: there is probably a better way to do this in redis - ret = dict([(k, self.get(k)) for k in self.keys()]) - return ret - - def __getstate__(self): - return dict() - - def __setstate__(self, data): - self.__init__() diff --git a/ansible_collections/community/general/plugins/cache/yaml.py b/ansible_collections/community/general/plugins/cache/yaml.py deleted file mode 100644 index e5062b16..00000000 --- a/ansible_collections/community/general/plugins/cache/yaml.py +++ /dev/null @@ -1,65 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2017, Brian Coca -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - name: yaml - short_description: YAML formatted files. - description: - - This cache uses YAML formatted, per host, files saved to the filesystem. - author: Brian Coca (@bcoca) - options: - _uri: - required: True - description: - - Path in which the cache plugin will save the files - env: - - name: ANSIBLE_CACHE_PLUGIN_CONNECTION - ini: - - key: fact_caching_connection - section: defaults - _prefix: - description: User defined prefix to use when creating the files - env: - - name: ANSIBLE_CACHE_PLUGIN_PREFIX - ini: - - key: fact_caching_prefix - section: defaults - _timeout: - default: 86400 - description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire - env: - - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT - ini: - - key: fact_caching_timeout - section: defaults - type: integer -''' - - -import codecs - -import yaml - -from ansible.parsing.yaml.loader import AnsibleLoader -from ansible.parsing.yaml.dumper import AnsibleDumper -from ansible.plugins.cache import BaseFileCacheModule - - -class CacheModule(BaseFileCacheModule): - """ - A caching module backed by yaml files. - """ - - def _load(self, filepath): - with codecs.open(filepath, 'r', encoding='utf-8') as f: - return AnsibleLoader(f).get_single_data() - - def _dump(self, value, filepath): - with codecs.open(filepath, 'w', encoding='utf-8') as f: - yaml.dump(value, f, Dumper=AnsibleDumper, default_flow_style=False) diff --git a/ansible_collections/community/general/plugins/callback/hipchat.py b/ansible_collections/community/general/plugins/callback/hipchat.py deleted file mode 100644 index c64b892d..00000000 --- a/ansible_collections/community/general/plugins/callback/hipchat.py +++ /dev/null @@ -1,228 +0,0 @@ -# -*- coding: utf-8 -*- -# (C) 2014, Matt Martz -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: hipchat - type: notification - requirements: - - whitelist in configuration. - - prettytable (python lib) - short_description: post task events to hipchat - description: - - This callback plugin sends status updates to a HipChat channel during playbook execution. - - Before 2.4 only environment variables were available for configuring this plugin. - options: - token: - description: HipChat API token for v1 or v2 API. - required: True - env: - - name: HIPCHAT_TOKEN - ini: - - section: callback_hipchat - key: token - api_version: - description: HipChat API version, v1 or v2. - required: False - default: v1 - env: - - name: HIPCHAT_API_VERSION - ini: - - section: callback_hipchat - key: api_version - room: - description: HipChat room to post in. - default: ansible - env: - - name: HIPCHAT_ROOM - ini: - - section: callback_hipchat - key: room - from: - description: Name to post as - default: ansible - env: - - name: HIPCHAT_FROM - ini: - - section: callback_hipchat - key: from - notify: - description: Add notify flag to important messages - type: bool - default: True - env: - - name: HIPCHAT_NOTIFY - ini: - - section: callback_hipchat - key: notify - -''' - -import os -import json - -try: - import prettytable - HAS_PRETTYTABLE = True -except ImportError: - HAS_PRETTYTABLE = False - -from ansible.plugins.callback import CallbackBase -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.urls import open_url - - -class CallbackModule(CallbackBase): - """This is an example ansible callback plugin that sends status - updates to a HipChat channel during playbook execution. - """ - - CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'notification' - CALLBACK_NAME = 'community.general.hipchat' - CALLBACK_NEEDS_WHITELIST = True - - API_V1_URL = 'https://api.hipchat.com/v1/rooms/message' - API_V2_URL = 'https://api.hipchat.com/v2/' - - def __init__(self): - - super(CallbackModule, self).__init__() - - if not HAS_PRETTYTABLE: - self.disabled = True - self._display.warning('The `prettytable` python module is not installed. ' - 'Disabling the HipChat callback plugin.') - self.printed_playbook = False - self.playbook_name = None - self.play = None - - def set_options(self, task_keys=None, var_options=None, direct=None): - super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct) - - self.token = self.get_option('token') - self.api_version = self.get_option('api_version') - self.from_name = self.get_option('from') - self.allow_notify = self.get_option('notify') - self.room = self.get_option('room') - - if self.token is None: - self.disabled = True - self._display.warning('HipChat token could not be loaded. The HipChat ' - 'token can be provided using the `HIPCHAT_TOKEN` ' - 'environment variable.') - - # Pick the request handler. - if self.api_version == 'v2': - self.send_msg = self.send_msg_v2 - else: - self.send_msg = self.send_msg_v1 - - def send_msg_v2(self, msg, msg_format='text', color='yellow', notify=False): - """Method for sending a message to HipChat""" - - headers = {'Authorization': 'Bearer %s' % self.token, 'Content-Type': 'application/json'} - - body = {} - body['room_id'] = self.room - body['from'] = self.from_name[:15] # max length is 15 - body['message'] = msg - body['message_format'] = msg_format - body['color'] = color - body['notify'] = self.allow_notify and notify - - data = json.dumps(body) - url = self.API_V2_URL + "room/{room_id}/notification".format(room_id=self.room) - try: - response = open_url(url, data=data, headers=headers, method='POST') - return response.read() - except Exception as ex: - self._display.warning('Could not submit message to hipchat: {0}'.format(ex)) - - def send_msg_v1(self, msg, msg_format='text', color='yellow', notify=False): - """Method for sending a message to HipChat""" - - params = {} - params['room_id'] = self.room - params['from'] = self.from_name[:15] # max length is 15 - params['message'] = msg - params['message_format'] = msg_format - params['color'] = color - params['notify'] = int(self.allow_notify and notify) - - url = ('%s?auth_token=%s' % (self.API_V1_URL, self.token)) - try: - response = open_url(url, data=urlencode(params)) - return response.read() - except Exception as ex: - self._display.warning('Could not submit message to hipchat: {0}'.format(ex)) - - def v2_playbook_on_play_start(self, play): - """Display Playbook and play start messages""" - - self.play = play - name = play.name - # This block sends information about a playbook when it starts - # The playbook object is not immediately available at - # playbook_on_start so we grab it via the play - # - # Displays info about playbook being started by a person on an - # inventory, as well as Tags, Skip Tags and Limits - if not self.printed_playbook: - self.playbook_name, dummy = os.path.splitext(os.path.basename(self.play.playbook.filename)) - host_list = self.play.playbook.inventory.host_list - inventory = os.path.basename(os.path.realpath(host_list)) - self.send_msg("%s: Playbook initiated by %s against %s" % - (self.playbook_name, - self.play.playbook.remote_user, - inventory), notify=True) - self.printed_playbook = True - subset = self.play.playbook.inventory._subset - skip_tags = self.play.playbook.skip_tags - self.send_msg("%s:\nTags: %s\nSkip Tags: %s\nLimit: %s" % - (self.playbook_name, - ', '.join(self.play.playbook.only_tags), - ', '.join(skip_tags) if skip_tags else None, - ', '.join(subset) if subset else subset)) - - # This is where we actually say we are starting a play - self.send_msg("%s: Starting play: %s" % - (self.playbook_name, name)) - - def playbook_on_stats(self, stats): - """Display info about playbook statistics""" - hosts = sorted(stats.processed.keys()) - - t = prettytable.PrettyTable(['Host', 'Ok', 'Changed', 'Unreachable', - 'Failures']) - - failures = False - unreachable = False - - for h in hosts: - s = stats.summarize(h) - - if s['failures'] > 0: - failures = True - if s['unreachable'] > 0: - unreachable = True - - t.add_row([h] + [s[k] for k in ['ok', 'changed', 'unreachable', - 'failures']]) - - self.send_msg("%s: Playbook complete" % self.playbook_name, - notify=True) - - if failures or unreachable: - color = 'red' - self.send_msg("%s: Failures detected" % self.playbook_name, - color=color, notify=True) - else: - color = 'green' - - self.send_msg("/code %s:\n%s" % (self.playbook_name, t), color=color) diff --git a/ansible_collections/community/general/plugins/callback/jabber.py b/ansible_collections/community/general/plugins/callback/jabber.py deleted file mode 100644 index b535fa95..00000000 --- a/ansible_collections/community/general/plugins/callback/jabber.py +++ /dev/null @@ -1,119 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2016 maxn nikolaev.makc@gmail.com -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: jabber - type: notification - short_description: post task events to a jabber server - description: - - The chatty part of ChatOps with a Hipchat server as a target - - This callback plugin sends status updates to a HipChat channel during playbook execution. - requirements: - - xmpp (python lib https://github.com/ArchipelProject/xmpppy) - options: - server: - description: connection info to jabber server - required: True - env: - - name: JABBER_SERV - user: - description: Jabber user to authenticate as - required: True - env: - - name: JABBER_USER - password: - description: Password for the user to the jabber server - required: True - env: - - name: JABBER_PASS - to: - description: chat identifier that will receive the message - required: True - env: - - name: JABBER_TO -''' - -import os - -HAS_XMPP = True -try: - import xmpp -except ImportError: - HAS_XMPP = False - -from ansible.plugins.callback import CallbackBase - - -class CallbackModule(CallbackBase): - - CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'notification' - CALLBACK_NAME = 'community.general.jabber' - CALLBACK_NEEDS_WHITELIST = True - - def __init__(self, display=None): - - super(CallbackModule, self).__init__(display=display) - - if not HAS_XMPP: - self._display.warning("The required python xmpp library (xmpppy) is not installed. " - "pip install git+https://github.com/ArchipelProject/xmpppy") - self.disabled = True - - self.serv = os.getenv('JABBER_SERV') - self.j_user = os.getenv('JABBER_USER') - self.j_pass = os.getenv('JABBER_PASS') - self.j_to = os.getenv('JABBER_TO') - - if (self.j_user or self.j_pass or self.serv or self.j_to) is None: - self.disabled = True - self._display.warning('Jabber CallBack wants the JABBER_SERV, JABBER_USER, JABBER_PASS and JABBER_TO environment variables') - - def send_msg(self, msg): - """Send message""" - jid = xmpp.JID(self.j_user) - client = xmpp.Client(self.serv, debug=[]) - client.connect(server=(self.serv, 5222)) - client.auth(jid.getNode(), self.j_pass, resource=jid.getResource()) - message = xmpp.Message(self.j_to, msg) - message.setAttr('type', 'chat') - client.send(message) - client.disconnect() - - def v2_runner_on_ok(self, result): - self._clean_results(result._result, result._task.action) - self.debug = self._dump_results(result._result) - - def v2_playbook_on_task_start(self, task, is_conditional): - self.task = task - - def v2_playbook_on_play_start(self, play): - """Display Playbook and play start messages""" - self.play = play - name = play.name - self.send_msg("Ansible starting play: %s" % (name)) - - def playbook_on_stats(self, stats): - name = self.play - hosts = sorted(stats.processed.keys()) - failures = False - unreachable = False - for h in hosts: - s = stats.summarize(h) - if s['failures'] > 0: - failures = True - if s['unreachable'] > 0: - unreachable = True - - if failures or unreachable: - out = self.debug - self.send_msg("%s: Failures detected \n%s \nHost: %s\n Failed at:\n%s" % (name, self.task, h, out)) - else: - out = self.debug - self.send_msg("Great! \n Playbook %s completed:\n%s \n Last task debug:\n %s" % (name, s, out)) diff --git a/ansible_collections/community/general/plugins/callback/logentries.py b/ansible_collections/community/general/plugins/callback/logentries.py deleted file mode 100644 index ad71a6d4..00000000 --- a/ansible_collections/community/general/plugins/callback/logentries.py +++ /dev/null @@ -1,331 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2015, Logentries.com, Jimmy Tang -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: logentries - type: notification - short_description: Sends events to Logentries - description: - - This callback plugin will generate JSON objects and send them to Logentries via TCP for auditing/debugging purposes. - - Before 2.4, if you wanted to use an ini configuration, the file must be placed in the same directory as this plugin and named logentries.ini - - In 2.4 and above you can just put it in the main Ansible configuration file. - requirements: - - whitelisting in configuration - - certifi (python library) - - flatdict (python library), if you want to use the 'flatten' option - options: - api: - description: URI to the Logentries API - env: - - name: LOGENTRIES_API - default: data.logentries.com - ini: - - section: callback_logentries - key: api - port: - description: HTTP port to use when connecting to the API - env: - - name: LOGENTRIES_PORT - default: 80 - ini: - - section: callback_logentries - key: port - tls_port: - description: Port to use when connecting to the API when TLS is enabled - env: - - name: LOGENTRIES_TLS_PORT - default: 443 - ini: - - section: callback_logentries - key: tls_port - token: - description: The logentries "TCP token" - env: - - name: LOGENTRIES_ANSIBLE_TOKEN - required: True - ini: - - section: callback_logentries - key: token - use_tls: - description: - - Toggle to decide whether to use TLS to encrypt the communications with the API server - env: - - name: LOGENTRIES_USE_TLS - default: False - type: boolean - ini: - - section: callback_logentries - key: use_tls - flatten: - description: flatten complex data structures into a single dictionary with complex keys - type: boolean - default: False - env: - - name: LOGENTRIES_FLATTEN - ini: - - section: callback_logentries - key: flatten -''' - -EXAMPLES = ''' -examples: > - To enable, add this to your ansible.cfg file in the defaults block - - [defaults] - callback_whitelist = community.general.logentries - - Either set the environment variables - export LOGENTRIES_API=data.logentries.com - export LOGENTRIES_PORT=10000 - export LOGENTRIES_ANSIBLE_TOKEN=dd21fc88-f00a-43ff-b977-e3a4233c53af - - Or in the main Ansible config file - [callback_logentries] - api = data.logentries.com - port = 10000 - tls_port = 20000 - use_tls = no - token = dd21fc88-f00a-43ff-b977-e3a4233c53af - flatten = False -''' - -import os -import socket -import random -import time -import uuid - -try: - import certifi - HAS_CERTIFI = True -except ImportError: - HAS_CERTIFI = False - -try: - import flatdict - HAS_FLATDICT = True -except ImportError: - HAS_FLATDICT = False - -from ansible.module_utils.common.text.converters import to_bytes, to_text -from ansible.plugins.callback import CallbackBase - -# Todo: -# * Better formatting of output before sending out to logentries data/api nodes. - - -class PlainTextSocketAppender(object): - def __init__(self, display, LE_API='data.logentries.com', LE_PORT=80, LE_TLS_PORT=443): - - self.LE_API = LE_API - self.LE_PORT = LE_PORT - self.LE_TLS_PORT = LE_TLS_PORT - self.MIN_DELAY = 0.1 - self.MAX_DELAY = 10 - # Error message displayed when an incorrect Token has been detected - self.INVALID_TOKEN = "\n\nIt appears the LOGENTRIES_TOKEN parameter you entered is incorrect!\n\n" - # Unicode Line separator character \u2028 - self.LINE_SEP = u'\u2028' - - self._display = display - self._conn = None - - def open_connection(self): - self._conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - self._conn.connect((self.LE_API, self.LE_PORT)) - - def reopen_connection(self): - self.close_connection() - - root_delay = self.MIN_DELAY - while True: - try: - self.open_connection() - return - except Exception as e: - self._display.vvvv(u"Unable to connect to Logentries: %s" % to_text(e)) - - root_delay *= 2 - if root_delay > self.MAX_DELAY: - root_delay = self.MAX_DELAY - - wait_for = root_delay + random.uniform(0, root_delay) - - try: - self._display.vvvv("sleeping %s before retry" % wait_for) - time.sleep(wait_for) - except KeyboardInterrupt: - raise - - def close_connection(self): - if self._conn is not None: - self._conn.close() - - def put(self, data): - # Replace newlines with Unicode line separator - # for multi-line events - data = to_text(data, errors='surrogate_or_strict') - multiline = data.replace(u'\n', self.LINE_SEP) - multiline += u"\n" - # Send data, reconnect if needed - while True: - try: - self._conn.send(to_bytes(multiline, errors='surrogate_or_strict')) - except socket.error: - self.reopen_connection() - continue - break - - self.close_connection() - - -try: - import ssl - HAS_SSL = True -except ImportError: # for systems without TLS support. - SocketAppender = PlainTextSocketAppender - HAS_SSL = False -else: - - class TLSSocketAppender(PlainTextSocketAppender): - def open_connection(self): - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - sock = ssl.wrap_socket( - sock=sock, - keyfile=None, - certfile=None, - server_side=False, - cert_reqs=ssl.CERT_REQUIRED, - ssl_version=getattr( - ssl, 'PROTOCOL_TLSv1_2', ssl.PROTOCOL_TLSv1), - ca_certs=certifi.where(), - do_handshake_on_connect=True, - suppress_ragged_eofs=True, ) - sock.connect((self.LE_API, self.LE_TLS_PORT)) - self._conn = sock - - SocketAppender = TLSSocketAppender - - -class CallbackModule(CallbackBase): - CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'notification' - CALLBACK_NAME = 'community.general.logentries' - CALLBACK_NEEDS_WHITELIST = True - - def __init__(self): - - # TODO: allow for alternate posting methods (REST/UDP/agent/etc) - super(CallbackModule, self).__init__() - - # verify dependencies - if not HAS_SSL: - self._display.warning("Unable to import ssl module. Will send over port 80.") - - if not HAS_CERTIFI: - self.disabled = True - self._display.warning('The `certifi` python module is not installed.\nDisabling the Logentries callback plugin.') - - self.le_jobid = str(uuid.uuid4()) - - # FIXME: make configurable, move to options - self.timeout = 10 - - def set_options(self, task_keys=None, var_options=None, direct=None): - - super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct) - - # get options - try: - self.api_url = self.get_option('api') - self.api_port = self.get_option('port') - self.api_tls_port = self.get_option('tls_port') - self.use_tls = self.get_option('use_tls') - self.flatten = self.get_option('flatten') - except KeyError as e: - self._display.warning(u"Missing option for Logentries callback plugin: %s" % to_text(e)) - self.disabled = True - - try: - self.token = self.get_option('token') - except KeyError as e: - self._display.warning('Logentries token was not provided, this is required for this callback to operate, disabling') - self.disabled = True - - if self.flatten and not HAS_FLATDICT: - self.disabled = True - self._display.warning('You have chosen to flatten and the `flatdict` python module is not installed.\nDisabling the Logentries callback plugin.') - - self._initialize_connections() - - def _initialize_connections(self): - - if not self.disabled: - if self.use_tls: - self._display.vvvv("Connecting to %s:%s with TLS" % (self.api_url, self.api_tls_port)) - self._appender = TLSSocketAppender(display=self._display, LE_API=self.api_url, LE_TLS_PORT=self.api_tls_port) - else: - self._display.vvvv("Connecting to %s:%s" % (self.api_url, self.api_port)) - self._appender = PlainTextSocketAppender(display=self._display, LE_API=self.api_url, LE_PORT=self.api_port) - self._appender.reopen_connection() - - def emit_formatted(self, record): - if self.flatten: - results = flatdict.FlatDict(record) - self.emit(self._dump_results(results)) - else: - self.emit(self._dump_results(record)) - - def emit(self, record): - msg = record.rstrip('\n') - msg = "{0} {1}".format(self.token, msg) - self._appender.put(msg) - self._display.vvvv("Sent event to logentries") - - def _set_info(self, host, res): - return {'le_jobid': self.le_jobid, 'hostname': host, 'results': res} - - def runner_on_ok(self, host, res): - results = self._set_info(host, res) - results['status'] = 'OK' - self.emit_formatted(results) - - def runner_on_failed(self, host, res, ignore_errors=False): - results = self._set_info(host, res) - results['status'] = 'FAILED' - self.emit_formatted(results) - - def runner_on_skipped(self, host, item=None): - results = self._set_info(host, item) - del results['results'] - results['status'] = 'SKIPPED' - self.emit_formatted(results) - - def runner_on_unreachable(self, host, res): - results = self._set_info(host, res) - results['status'] = 'UNREACHABLE' - self.emit_formatted(results) - - def runner_on_async_failed(self, host, res, jid): - results = self._set_info(host, res) - results['jid'] = jid - results['status'] = 'ASYNC_FAILED' - self.emit_formatted(results) - - def v2_playbook_on_play_start(self, play): - results = {} - results['le_jobid'] = self.le_jobid - results['started_by'] = os.getlogin() - if play.name: - results['play'] = play.name - results['hosts'] = play.hosts - self.emit_formatted(results) - - def playbook_on_stats(self, stats): - """ close connection """ - self._appender.close_connection() diff --git a/ansible_collections/community/general/plugins/callback/mail.py b/ansible_collections/community/general/plugins/callback/mail.py deleted file mode 100644 index 3805bae5..00000000 --- a/ansible_collections/community/general/plugins/callback/mail.py +++ /dev/null @@ -1,253 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2012, Dag Wieers -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' -name: mail -type: notification -short_description: Sends failure events via email -description: -- This callback will report failures via email. -author: -- Dag Wieers (@dagwieers) -requirements: -- whitelisting in configuration -options: - mta: - description: - - Mail Transfer Agent, server that accepts SMTP. - type: str - env: - - name: SMTPHOST - ini: - - section: callback_mail - key: smtphost - default: localhost - mtaport: - description: - - Mail Transfer Agent Port. - - Port at which server SMTP. - type: int - ini: - - section: callback_mail - key: smtpport - default: 25 - to: - description: - - Mail recipient. - type: list - elements: str - ini: - - section: callback_mail - key: to - default: [root] - sender: - description: - - Mail sender. - - Note that this will be required from community.general 6.0.0 on. - type: str - ini: - - section: callback_mail - key: sender - cc: - description: - - CC'd recipients. - type: list - elements: str - ini: - - section: callback_mail - key: cc - bcc: - description: - - BCC'd recipients. - type: list - elements: str - ini: - - section: callback_mail - key: bcc -''' - -import json -import os -import re -import email.utils -import smtplib - -from ansible.module_utils.six import string_types -from ansible.module_utils.common.text.converters import to_bytes -from ansible.parsing.ajson import AnsibleJSONEncoder -from ansible.plugins.callback import CallbackBase - - -class CallbackModule(CallbackBase): - ''' This Ansible callback plugin mails errors to interested parties. ''' - CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'notification' - CALLBACK_NAME = 'community.general.mail' - CALLBACK_NEEDS_WHITELIST = True - - def __init__(self, display=None): - super(CallbackModule, self).__init__(display=display) - self.sender = None - self.to = 'root' - self.smtphost = os.getenv('SMTPHOST', 'localhost') - self.smtpport = 25 - self.cc = None - self.bcc = None - - def set_options(self, task_keys=None, var_options=None, direct=None): - - super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct) - - self.sender = self.get_option('sender') - if self.sender is None: - self._display.deprecated( - 'The sender for the mail callback has not been specified. This will be an error in the future', - version='6.0.0', collection_name='community.general') - self.to = self.get_option('to') - self.smtphost = self.get_option('mta') - self.smtpport = self.get_option('mtaport') - self.cc = self.get_option('cc') - self.bcc = self.get_option('bcc') - - def mail(self, subject='Ansible error mail', body=None): - if body is None: - body = subject - - smtp = smtplib.SMTP(self.smtphost, port=self.smtpport) - - sender_address = email.utils.parseaddr(self.sender) - if self.to: - to_addresses = email.utils.getaddresses(self.to) - if self.cc: - cc_addresses = email.utils.getaddresses(self.cc) - if self.bcc: - bcc_addresses = email.utils.getaddresses(self.bcc) - - content = 'Date: %s\n' % email.utils.formatdate() - content += 'From: %s\n' % email.utils.formataddr(sender_address) - if self.to: - content += 'To: %s\n' % ', '.join([email.utils.formataddr(pair) for pair in to_addresses]) - if self.cc: - content += 'Cc: %s\n' % ', '.join([email.utils.formataddr(pair) for pair in cc_addresses]) - content += 'Message-ID: %s\n' % email.utils.make_msgid() - content += 'Subject: %s\n\n' % subject.strip() - content += body - - addresses = to_addresses - if self.cc: - addresses += cc_addresses - if self.bcc: - addresses += bcc_addresses - - if not addresses: - self._display.warning('No receiver has been specified for the mail callback plugin.') - - smtp.sendmail(self.sender, [address for name, address in addresses], to_bytes(content)) - - smtp.quit() - - def subject_msg(self, multiline, failtype, linenr): - return '%s: %s' % (failtype, multiline.strip('\r\n').splitlines()[linenr]) - - def indent(self, multiline, indent=8): - return re.sub('^', ' ' * indent, multiline, flags=re.MULTILINE) - - def body_blob(self, multiline, texttype): - ''' Turn some text output in a well-indented block for sending in a mail body ''' - intro = 'with the following %s:\n\n' % texttype - blob = '' - for line in multiline.strip('\r\n').splitlines(): - blob += '%s\n' % line - return intro + self.indent(blob) + '\n' - - def mail_result(self, result, failtype): - host = result._host.get_name() - if not self.sender: - self.sender = '"Ansible: %s" ' % host - - # Add subject - if self.itembody: - subject = self.itemsubject - elif result._result.get('failed_when_result') is True: - subject = "Failed due to 'failed_when' condition" - elif result._result.get('msg'): - subject = self.subject_msg(result._result['msg'], failtype, 0) - elif result._result.get('stderr'): - subject = self.subject_msg(result._result['stderr'], failtype, -1) - elif result._result.get('stdout'): - subject = self.subject_msg(result._result['stdout'], failtype, -1) - elif result._result.get('exception'): # Unrelated exceptions are added to output :-/ - subject = self.subject_msg(result._result['exception'], failtype, -1) - else: - subject = '%s: %s' % (failtype, result._task.name or result._task.action) - - # Make playbook name visible (e.g. in Outlook/Gmail condensed view) - body = 'Playbook: %s\n' % os.path.basename(self.playbook._file_name) - if result._task.name: - body += 'Task: %s\n' % result._task.name - body += 'Module: %s\n' % result._task.action - body += 'Host: %s\n' % host - body += '\n' - - # Add task information (as much as possible) - body += 'The following task failed:\n\n' - if 'invocation' in result._result: - body += self.indent('%s: %s\n' % (result._task.action, json.dumps(result._result['invocation']['module_args'], indent=4))) - elif result._task.name: - body += self.indent('%s (%s)\n' % (result._task.name, result._task.action)) - else: - body += self.indent('%s\n' % result._task.action) - body += '\n' - - # Add item / message - if self.itembody: - body += self.itembody - elif result._result.get('failed_when_result') is True: - body += "due to the following condition:\n\n" + self.indent('failed_when:\n- ' + '\n- '.join(result._task.failed_when)) + '\n\n' - elif result._result.get('msg'): - body += self.body_blob(result._result['msg'], 'message') - - # Add stdout / stderr / exception / warnings / deprecations - if result._result.get('stdout'): - body += self.body_blob(result._result['stdout'], 'standard output') - if result._result.get('stderr'): - body += self.body_blob(result._result['stderr'], 'error output') - if result._result.get('exception'): # Unrelated exceptions are added to output :-/ - body += self.body_blob(result._result['exception'], 'exception') - if result._result.get('warnings'): - for i in range(len(result._result.get('warnings'))): - body += self.body_blob(result._result['warnings'][i], 'exception %d' % (i + 1)) - if result._result.get('deprecations'): - for i in range(len(result._result.get('deprecations'))): - body += self.body_blob(result._result['deprecations'][i], 'exception %d' % (i + 1)) - - body += 'and a complete dump of the error:\n\n' - body += self.indent('%s: %s' % (failtype, json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4))) - - self.mail(subject=subject, body=body) - - def v2_playbook_on_start(self, playbook): - self.playbook = playbook - self.itembody = '' - - def v2_runner_on_failed(self, result, ignore_errors=False): - if ignore_errors: - return - - self.mail_result(result, 'Failed') - - def v2_runner_on_unreachable(self, result): - self.mail_result(result, 'Unreachable') - - def v2_runner_on_async_failed(self, result): - self.mail_result(result, 'Async failure') - - def v2_runner_item_on_failed(self, result): - # Pass item information to task failure - self.itemsubject = result._result['msg'] - self.itembody += self.body_blob(json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4), "failed item dump '%(item)s'" % result._result) diff --git a/ansible_collections/community/general/plugins/callback/null.py b/ansible_collections/community/general/plugins/callback/null.py deleted file mode 100644 index 13ea65b4..00000000 --- a/ansible_collections/community/general/plugins/callback/null.py +++ /dev/null @@ -1,31 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: 'null' - type: stdout - requirements: - - set as main display callback - short_description: Don't display stuff to screen - description: - - This callback prevents outputing events to screen -''' - -from ansible.plugins.callback import CallbackBase - - -class CallbackModule(CallbackBase): - - ''' - This callback wont print messages to stdout when new callback events are received. - ''' - - CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'stdout' - CALLBACK_NAME = 'community.general.null' diff --git a/ansible_collections/community/general/plugins/callback/osx_say.py b/ansible_collections/community/general/plugins/callback/osx_say.py deleted file mode 120000 index f080521d..00000000 --- a/ansible_collections/community/general/plugins/callback/osx_say.py +++ /dev/null @@ -1 +0,0 @@ -say.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/callback/say.py b/ansible_collections/community/general/plugins/callback/say.py deleted file mode 100644 index 8d67e433..00000000 --- a/ansible_collections/community/general/plugins/callback/say.py +++ /dev/null @@ -1,118 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2012, Michael DeHaan, -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: say - type: notification - requirements: - - whitelisting in configuration - - the '/usr/bin/say' command line program (standard on macOS) or 'espeak' command line program - short_description: notify using software speech synthesizer - description: - - This plugin will use the 'say' or 'espeak' program to "speak" about play events. - notes: - - In 2.8, this callback has been renamed from C(osx_say) into M(community.general.say). -''' - -import platform -import subprocess -import os - -from ansible.module_utils.common.process import get_bin_path -from ansible.plugins.callback import CallbackBase - - -class CallbackModule(CallbackBase): - """ - makes Ansible much more exciting. - """ - CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'notification' - CALLBACK_NAME = 'community.general.say' - CALLBACK_NEEDS_WHITELIST = True - - def __init__(self): - - super(CallbackModule, self).__init__() - - self.FAILED_VOICE = None - self.REGULAR_VOICE = None - self.HAPPY_VOICE = None - self.LASER_VOICE = None - - try: - self.synthesizer = get_bin_path('say') - if platform.system() != 'Darwin': - # 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter - self._display.warning("'say' executable found but system is '%s': ignoring voice parameter" % platform.system()) - else: - self.FAILED_VOICE = 'Zarvox' - self.REGULAR_VOICE = 'Trinoids' - self.HAPPY_VOICE = 'Cellos' - self.LASER_VOICE = 'Princess' - except ValueError: - try: - self.synthesizer = get_bin_path('espeak') - self.FAILED_VOICE = 'klatt' - self.HAPPY_VOICE = 'f5' - self.LASER_VOICE = 'whisper' - except ValueError: - self.synthesizer = None - - # plugin disable itself if say is not present - # ansible will not call any callback if disabled is set to True - if not self.synthesizer: - self.disabled = True - self._display.warning("Unable to find either 'say' or 'espeak' executable, plugin %s disabled" % os.path.basename(__file__)) - - def say(self, msg, voice): - cmd = [self.synthesizer, msg] - if voice: - cmd.extend(('-v', voice)) - subprocess.call(cmd) - - def runner_on_failed(self, host, res, ignore_errors=False): - self.say("Failure on host %s" % host, self.FAILED_VOICE) - - def runner_on_ok(self, host, res): - self.say("pew", self.LASER_VOICE) - - def runner_on_skipped(self, host, item=None): - self.say("pew", self.LASER_VOICE) - - def runner_on_unreachable(self, host, res): - self.say("Failure on host %s" % host, self.FAILED_VOICE) - - def runner_on_async_ok(self, host, res, jid): - self.say("pew", self.LASER_VOICE) - - def runner_on_async_failed(self, host, res, jid): - self.say("Failure on host %s" % host, self.FAILED_VOICE) - - def playbook_on_start(self): - self.say("Running Playbook", self.REGULAR_VOICE) - - def playbook_on_notify(self, host, handler): - self.say("pew", self.LASER_VOICE) - - def playbook_on_task_start(self, name, is_conditional): - if not is_conditional: - self.say("Starting task: %s" % name, self.REGULAR_VOICE) - else: - self.say("Notifying task: %s" % name, self.REGULAR_VOICE) - - def playbook_on_setup(self): - self.say("Gathering facts", self.REGULAR_VOICE) - - def playbook_on_play_start(self, name): - self.say("Starting play: %s" % name, self.HAPPY_VOICE) - - def playbook_on_stats(self, stats): - self.say("Play complete", self.HAPPY_VOICE) diff --git a/ansible_collections/community/general/plugins/callback/slack.py b/ansible_collections/community/general/plugins/callback/slack.py deleted file mode 100644 index 5cb402b1..00000000 --- a/ansible_collections/community/general/plugins/callback/slack.py +++ /dev/null @@ -1,252 +0,0 @@ -# -*- coding: utf-8 -*- -# (C) 2014-2015, Matt Martz -# (C) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: slack - type: notification - requirements: - - whitelist in configuration - - prettytable (python library) - short_description: Sends play events to a Slack channel - description: - - This is an ansible callback plugin that sends status updates to a Slack channel during playbook execution. - - Before 2.4 only environment variables were available for configuring this plugin - options: - webhook_url: - required: True - description: Slack Webhook URL - env: - - name: SLACK_WEBHOOK_URL - ini: - - section: callback_slack - key: webhook_url - channel: - default: "#ansible" - description: Slack room to post in. - env: - - name: SLACK_CHANNEL - ini: - - section: callback_slack - key: channel - username: - description: Username to post as. - env: - - name: SLACK_USERNAME - default: ansible - ini: - - section: callback_slack - key: username - validate_certs: - description: validate the SSL certificate of the Slack server. (For HTTPS URLs) - env: - - name: SLACK_VALIDATE_CERTS - ini: - - section: callback_slack - key: validate_certs - default: True - type: bool -''' - -import json -import os -import uuid - -from ansible import context -from ansible.module_utils.common.text.converters import to_text -from ansible.module_utils.urls import open_url -from ansible.plugins.callback import CallbackBase - -try: - import prettytable - HAS_PRETTYTABLE = True -except ImportError: - HAS_PRETTYTABLE = False - - -class CallbackModule(CallbackBase): - """This is an ansible callback plugin that sends status - updates to a Slack channel during playbook execution. - """ - CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'notification' - CALLBACK_NAME = 'community.general.slack' - CALLBACK_NEEDS_WHITELIST = True - - def __init__(self, display=None): - - super(CallbackModule, self).__init__(display=display) - - if not HAS_PRETTYTABLE: - self.disabled = True - self._display.warning('The `prettytable` python module is not ' - 'installed. Disabling the Slack callback ' - 'plugin.') - - self.playbook_name = None - - # This is a 6 character identifier provided with each message - # This makes it easier to correlate messages when there are more - # than 1 simultaneous playbooks running - self.guid = uuid.uuid4().hex[:6] - - def set_options(self, task_keys=None, var_options=None, direct=None): - - super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct) - - self.webhook_url = self.get_option('webhook_url') - self.channel = self.get_option('channel') - self.username = self.get_option('username') - self.show_invocation = (self._display.verbosity > 1) - self.validate_certs = self.get_option('validate_certs') - - if self.webhook_url is None: - self.disabled = True - self._display.warning('Slack Webhook URL was not provided. The ' - 'Slack Webhook URL can be provided using ' - 'the `SLACK_WEBHOOK_URL` environment ' - 'variable.') - - def send_msg(self, attachments): - headers = { - 'Content-type': 'application/json', - } - - payload = { - 'channel': self.channel, - 'username': self.username, - 'attachments': attachments, - 'parse': 'none', - 'icon_url': ('https://cdn2.hubspot.net/hub/330046/' - 'file-449187601-png/ansible_badge.png'), - } - - data = json.dumps(payload) - self._display.debug(data) - self._display.debug(self.webhook_url) - try: - response = open_url(self.webhook_url, data=data, validate_certs=self.validate_certs, - headers=headers) - return response.read() - except Exception as e: - self._display.warning(u'Could not submit message to Slack: %s' % - to_text(e)) - - def v2_playbook_on_start(self, playbook): - self.playbook_name = os.path.basename(playbook._file_name) - - title = [ - '*Playbook initiated* (_%s_)' % self.guid - ] - - invocation_items = [] - if context.CLIARGS and self.show_invocation: - tags = context.CLIARGS['tags'] - skip_tags = context.CLIARGS['skip_tags'] - extra_vars = context.CLIARGS['extra_vars'] - subset = context.CLIARGS['subset'] - inventory = [os.path.abspath(i) for i in context.CLIARGS['inventory']] - - invocation_items.append('Inventory: %s' % ', '.join(inventory)) - if tags and tags != ['all']: - invocation_items.append('Tags: %s' % ', '.join(tags)) - if skip_tags: - invocation_items.append('Skip Tags: %s' % ', '.join(skip_tags)) - if subset: - invocation_items.append('Limit: %s' % subset) - if extra_vars: - invocation_items.append('Extra Vars: %s' % - ' '.join(extra_vars)) - - title.append('by *%s*' % context.CLIARGS['remote_user']) - - title.append('\n\n*%s*' % self.playbook_name) - msg_items = [' '.join(title)] - if invocation_items: - msg_items.append('```\n%s\n```' % '\n'.join(invocation_items)) - - msg = '\n'.join(msg_items) - - attachments = [{ - 'fallback': msg, - 'fields': [ - { - 'value': msg - } - ], - 'color': 'warning', - 'mrkdwn_in': ['text', 'fallback', 'fields'], - }] - - self.send_msg(attachments=attachments) - - def v2_playbook_on_play_start(self, play): - """Display Play start messages""" - - name = play.name or 'Play name not specified (%s)' % play._uuid - msg = '*Starting play* (_%s_)\n\n*%s*' % (self.guid, name) - attachments = [ - { - 'fallback': msg, - 'text': msg, - 'color': 'warning', - 'mrkdwn_in': ['text', 'fallback', 'fields'], - } - ] - self.send_msg(attachments=attachments) - - def v2_playbook_on_stats(self, stats): - """Display info about playbook statistics""" - - hosts = sorted(stats.processed.keys()) - - t = prettytable.PrettyTable(['Host', 'Ok', 'Changed', 'Unreachable', - 'Failures', 'Rescued', 'Ignored']) - - failures = False - unreachable = False - - for h in hosts: - s = stats.summarize(h) - - if s['failures'] > 0: - failures = True - if s['unreachable'] > 0: - unreachable = True - - t.add_row([h] + [s[k] for k in ['ok', 'changed', 'unreachable', - 'failures', 'rescued', 'ignored']]) - - attachments = [] - msg_items = [ - '*Playbook Complete* (_%s_)' % self.guid - ] - if failures or unreachable: - color = 'danger' - msg_items.append('\n*Failed!*') - else: - color = 'good' - msg_items.append('\n*Success!*') - - msg_items.append('```\n%s\n```' % t) - - msg = '\n'.join(msg_items) - - attachments.append({ - 'fallback': msg, - 'fields': [ - { - 'value': msg - } - ], - 'color': color, - 'mrkdwn_in': ['text', 'fallback', 'fields'] - }) - - self.send_msg(attachments=attachments) diff --git a/ansible_collections/community/general/plugins/callback/yaml.py b/ansible_collections/community/general/plugins/callback/yaml.py deleted file mode 100644 index 59fb3509..00000000 --- a/ansible_collections/community/general/plugins/callback/yaml.py +++ /dev/null @@ -1,131 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: yaml - type: stdout - short_description: yaml-ized Ansible screen output - description: - - Ansible output that can be quite a bit easier to read than the - default JSON formatting. - extends_documentation_fragment: - - default_callback - requirements: - - set as stdout in configuration -''' - -import yaml -import json -import re -import string -import sys - -from ansible.module_utils.common.text.converters import to_bytes, to_text -from ansible.module_utils.six import string_types -from ansible.parsing.yaml.dumper import AnsibleDumper -from ansible.plugins.callback import CallbackBase, strip_internal_keys, module_response_deepcopy -from ansible.plugins.callback.default import CallbackModule as Default - - -# from http://stackoverflow.com/a/15423007/115478 -def should_use_block(value): - """Returns true if string should be in block format""" - for c in u"\u000a\u000d\u001c\u001d\u001e\u0085\u2028\u2029": - if c in value: - return True - return False - - -class MyDumper(AnsibleDumper): - def represent_scalar(self, tag, value, style=None): - """Uses block style for multi-line strings""" - if style is None: - if should_use_block(value): - style = '|' - # we care more about readable than accuracy, so... - # ...no trailing space - value = value.rstrip() - # ...and non-printable characters - value = ''.join(x for x in value if x in string.printable or ord(x) >= 0xA0) - # ...tabs prevent blocks from expanding - value = value.expandtabs() - # ...and odd bits of whitespace - value = re.sub(r'[\x0b\x0c\r]', '', value) - # ...as does trailing space - value = re.sub(r' +\n', '\n', value) - else: - style = self.default_style - node = yaml.representer.ScalarNode(tag, value, style=style) - if self.alias_key is not None: - self.represented_objects[self.alias_key] = node - return node - - -class CallbackModule(Default): - - """ - Variation of the Default output which uses nicely readable YAML instead - of JSON for printing results. - """ - - CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'stdout' - CALLBACK_NAME = 'community.general.yaml' - - def __init__(self): - super(CallbackModule, self).__init__() - - def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False): - if result.get('_ansible_no_log', False): - return json.dumps(dict(censored="The output has been hidden due to the fact that 'no_log: true' was specified for this result")) - - # All result keys stating with _ansible_ are internal, so remove them from the result before we output anything. - abridged_result = strip_internal_keys(module_response_deepcopy(result)) - - # remove invocation unless specifically wanting it - if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result: - del abridged_result['invocation'] - - # remove diff information from screen output - if self._display.verbosity < 3 and 'diff' in result: - del abridged_result['diff'] - - # remove exception from screen output - if 'exception' in abridged_result: - del abridged_result['exception'] - - dumped = '' - - # put changed and skipped into a header line - if 'changed' in abridged_result: - dumped += 'changed=' + str(abridged_result['changed']).lower() + ' ' - del abridged_result['changed'] - - if 'skipped' in abridged_result: - dumped += 'skipped=' + str(abridged_result['skipped']).lower() + ' ' - del abridged_result['skipped'] - - # if we already have stdout, we don't need stdout_lines - if 'stdout' in abridged_result and 'stdout_lines' in abridged_result: - abridged_result['stdout_lines'] = '' - - # if we already have stderr, we don't need stderr_lines - if 'stderr' in abridged_result and 'stderr_lines' in abridged_result: - abridged_result['stderr_lines'] = '' - - if abridged_result: - dumped += '\n' - dumped += to_text(yaml.dump(abridged_result, allow_unicode=True, width=1000, Dumper=MyDumper, default_flow_style=False)) - - # indent by a couple of spaces - dumped = '\n '.join(dumped.split('\n')).rstrip() - return dumped - - def _serialize_diff(self, diff): - return to_text(yaml.dump(diff, allow_unicode=True, width=1000, Dumper=AnsibleDumper, default_flow_style=False)) diff --git a/ansible_collections/community/general/plugins/connection/lxd.py b/ansible_collections/community/general/plugins/connection/lxd.py deleted file mode 100644 index f3b06e6e..00000000 --- a/ansible_collections/community/general/plugins/connection/lxd.py +++ /dev/null @@ -1,161 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2016 Matt Clay -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - author: Matt Clay (@mattclay) - name: lxd - short_description: Run tasks in lxc containers via lxc CLI - description: - - Run commands or put/fetch files to an existing lxc container using lxc CLI - options: - remote_addr: - description: - - Container identifier. - default: inventory_hostname - vars: - - name: ansible_host - - name: ansible_lxd_host - executable: - description: - - shell to use for execution inside container - default: /bin/sh - vars: - - name: ansible_executable - - name: ansible_lxd_executable - remote: - description: - - Name of the LXD remote to use. - default: local - vars: - - name: ansible_lxd_remote - version_added: 2.0.0 - project: - description: - - Name of the LXD project to use. - vars: - - name: ansible_lxd_project - version_added: 2.0.0 -''' - -import os -from subprocess import Popen, PIPE - -from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound -from ansible.module_utils.common.process import get_bin_path -from ansible.module_utils.common.text.converters import to_bytes, to_text -from ansible.plugins.connection import ConnectionBase - - -class Connection(ConnectionBase): - """ lxd based connections """ - - transport = 'community.general.lxd' - has_pipelining = True - default_user = 'root' - - def __init__(self, play_context, new_stdin, *args, **kwargs): - super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) - - self._host = self._play_context.remote_addr - try: - self._lxc_cmd = get_bin_path("lxc") - except ValueError: - raise AnsibleError("lxc command not found in PATH") - - if self._play_context.remote_user is not None and self._play_context.remote_user != 'root': - self._display.warning('lxd does not support remote_user, using container default: root') - - def _connect(self): - """connect to lxd (nothing to do here) """ - super(Connection, self)._connect() - - if not self._connected: - self._display.vvv(u"ESTABLISH LXD CONNECTION FOR USER: root", host=self._host) - self._connected = True - - def exec_command(self, cmd, in_data=None, sudoable=True): - """ execute a command on the lxd host """ - super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) - - self._display.vvv(u"EXEC {0}".format(cmd), host=self._host) - - local_cmd = [self._lxc_cmd] - if self.get_option("project"): - local_cmd.extend(["--project", self.get_option("project")]) - local_cmd.extend([ - "exec", - "%s:%s" % (self.get_option("remote"), self.get_option("remote_addr")), - "--", - self.get_option("executable"), "-c", cmd - ]) - - local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] - in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru') - - process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) - stdout, stderr = process.communicate(in_data) - - stdout = to_text(stdout) - stderr = to_text(stderr) - - if stderr == "error: Container is not running.\n": - raise AnsibleConnectionFailure("container not running: %s" % self._host) - - if stderr == "error: not found\n": - raise AnsibleConnectionFailure("container not found: %s" % self._host) - - return process.returncode, stdout, stderr - - def put_file(self, in_path, out_path): - """ put a file from local to lxd """ - super(Connection, self).put_file(in_path, out_path) - - self._display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self._host) - - if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')): - raise AnsibleFileNotFound("input path is not a file: %s" % in_path) - - local_cmd = [self._lxc_cmd] - if self.get_option("project"): - local_cmd.extend(["--project", self.get_option("project")]) - local_cmd.extend([ - "file", "push", - in_path, - "%s:%s/%s" % (self.get_option("remote"), self.get_option("remote_addr"), out_path) - ]) - - local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] - - process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) - process.communicate() - - def fetch_file(self, in_path, out_path): - """ fetch a file from lxd to local """ - super(Connection, self).fetch_file(in_path, out_path) - - self._display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._host) - - local_cmd = [self._lxc_cmd] - if self.get_option("project"): - local_cmd.extend(["--project", self.get_option("project")]) - local_cmd.extend([ - "file", "pull", - "%s:%s/%s" % (self.get_option("remote"), self.get_option("remote_addr"), in_path), - out_path - ]) - - local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] - - process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) - process.communicate() - - def close(self): - """ close the connection (nothing to do here) """ - super(Connection, self).close() - - self._connected = False diff --git a/ansible_collections/community/general/plugins/doc_fragments/alicloud.py b/ansible_collections/community/general/plugins/doc_fragments/alicloud.py deleted file mode 100644 index f9c9640b..00000000 --- a/ansible_collections/community/general/plugins/doc_fragments/alicloud.py +++ /dev/null @@ -1,108 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - - # Alicloud only documentation fragment - DOCUMENTATION = r''' -options: - alicloud_access_key: - description: - - Alibaba Cloud access key. If not set then the value of environment variable C(ALICLOUD_ACCESS_KEY), - C(ALICLOUD_ACCESS_KEY_ID) will be used instead. - aliases: ['access_key_id', 'access_key'] - type: str - alicloud_secret_key: - description: - - Alibaba Cloud secret key. If not set then the value of environment variable C(ALICLOUD_SECRET_KEY), - C(ALICLOUD_SECRET_ACCESS_KEY) will be used instead. - aliases: ['secret_access_key', 'secret_key'] - type: str - alicloud_region: - description: - - The Alibaba Cloud region to use. If not specified then the value of environment variable - C(ALICLOUD_REGION), C(ALICLOUD_REGION_ID) will be used instead. - aliases: ['region', 'region_id'] - required: true - type: str - alicloud_security_token: - description: - - The Alibaba Cloud security token. If not specified then the value of environment variable - C(ALICLOUD_SECURITY_TOKEN) will be used instead. - aliases: ['security_token'] - type: str - alicloud_assume_role: - description: - - If provided with a role ARN, Ansible will attempt to assume this role using the supplied credentials. - - The nested assume_role block supports I(alicloud_assume_role_arn), I(alicloud_assume_role_session_name), - I(alicloud_assume_role_session_expiration) and I(alicloud_assume_role_policy) - type: dict - aliases: ['assume_role'] - alicloud_assume_role_arn: - description: - - The Alibaba Cloud role_arn. The ARN of the role to assume. If ARN is set to an empty string, - it does not perform role switching. It supports environment variable ALICLOUD_ASSUME_ROLE_ARN. - ansible will execute with provided credentials. - aliases: ['assume_role_arn'] - type: str - alicloud_assume_role_session_name: - description: - - The Alibaba Cloud session_name. The session name to use when assuming the role. If omitted, - 'ansible' is passed to the AssumeRole call as session name. It supports environment variable - ALICLOUD_ASSUME_ROLE_SESSION_NAME - aliases: ['assume_role_session_name'] - type: str - alicloud_assume_role_session_expiration: - description: - - The Alibaba Cloud session_expiration. The time after which the established session for assuming - role expires. Valid value range 900-3600 seconds. Default to 3600 (in this case Alicloud use own default - value). It supports environment variable ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION - aliases: ['assume_role_session_expiration'] - type: int - ecs_role_name: - description: - - The RAM Role Name attached on a ECS instance for API operations. You can retrieve this from the 'Access Control' - section of the Alibaba Cloud console. - - If you're running Ansible from an ECS instance with RAM Instance using RAM Role, Ansible will just access the - metadata U(http://100.100.100.200/latest/meta-data/ram/security-credentials/) to obtain the STS - credential. This is a preferred approach over any other when running in ECS as you can avoid hard coding - credentials. Instead these are leased on-the-fly by Ansible which reduces the chance of leakage. - aliases: ['role_name'] - type: str - profile: - description: - - This is the Alicloud profile name as set in the shared credentials file. It can also be sourced from the - ALICLOUD_PROFILE environment variable. - type: str - shared_credentials_file: - description: - - This is the path to the shared credentials file. It can also be sourced from the ALICLOUD_SHARED_CREDENTIALS_FILE - environment variable. - - If this is not set and a profile is specified, ~/.aliyun/config.json will be used. - type: str -author: - - "He Guimin (@xiaozhu36)" -requirements: - - "python >= 3.6" -notes: - - If parameters are not set within the module, the following - environment variables can be used in decreasing order of precedence - C(ALICLOUD_ACCESS_KEY) or C(ALICLOUD_ACCESS_KEY_ID), - C(ALICLOUD_SECRET_KEY) or C(ALICLOUD_SECRET_ACCESS_KEY), - C(ALICLOUD_REGION) or C(ALICLOUD_REGION_ID), - C(ALICLOUD_SECURITY_TOKEN), - C(ALICLOUD_ECS_ROLE_NAME), - C(ALICLOUD_SHARED_CREDENTIALS_FILE), - C(ALICLOUD_PROFILE), - C(ALICLOUD_ASSUME_ROLE_ARN), - C(ALICLOUD_ASSUME_ROLE_SESSION_NAME), - C(ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION), - - C(ALICLOUD_REGION) or C(ALICLOUD_REGION_ID) can be typically be used to specify the - ALICLOUD region, when required, but this can also be configured in the footmark config file -''' diff --git a/ansible_collections/community/general/plugins/doc_fragments/bitbucket.py b/ansible_collections/community/general/plugins/doc_fragments/bitbucket.py deleted file mode 100644 index 28489356..00000000 --- a/ansible_collections/community/general/plugins/doc_fragments/bitbucket.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Evgeniy Krysanov -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - - # Standard documentation fragment - DOCUMENTATION = r''' -options: - client_id: - description: - - The OAuth consumer key. - - If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used. - type: str - client_secret: - description: - - The OAuth consumer secret. - - If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used. - type: str - user: - description: - - The username. - - If not set the environment variable C(BITBUCKET_USERNAME) will be used. - type: str - version_added: 4.0.0 - password: - description: - - The App password. - - If not set the environment variable C(BITBUCKET_PASSWORD) will be used. - type: str - version_added: 4.0.0 -notes: - - Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth. - - Bitbucket App password can be created from Bitbucket profile -> Personal Settings -> App passwords. - - If both OAuth and Basic Auth credentials are passed, OAuth credentials take precedence. -''' diff --git a/ansible_collections/community/general/plugins/doc_fragments/dimensiondata.py b/ansible_collections/community/general/plugins/doc_fragments/dimensiondata.py deleted file mode 100644 index 02435e25..00000000 --- a/ansible_collections/community/general/plugins/doc_fragments/dimensiondata.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2016, Dimension Data -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -# Authors: -# - Adam Friedman - - -class ModuleDocFragment(object): - - # Dimension Data doc fragment - DOCUMENTATION = r''' - -options: - region: - description: - - The target region. - - Regions are defined in Apache libcloud project [libcloud/common/dimensiondata.py] - - They are also listed in U(https://libcloud.readthedocs.io/en/latest/compute/drivers/dimensiondata.html) - - Note that the default value "na" stands for "North America". - - The module prepends 'dd-' to the region choice. - type: str - default: na - mcp_user: - description: - - The username used to authenticate to the CloudControl API. - - If not specified, will fall back to C(MCP_USER) from environment variable or C(~/.dimensiondata). - type: str - mcp_password: - description: - - The password used to authenticate to the CloudControl API. - - If not specified, will fall back to C(MCP_PASSWORD) from environment variable or C(~/.dimensiondata). - - Required if I(mcp_user) is specified. - type: str - location: - description: - - The target datacenter. - type: str - required: true - validate_certs: - description: - - If C(false), SSL certificates will not be validated. - - This should only be used on private instances of the CloudControl API that use self-signed certificates. - type: bool - default: yes -''' diff --git a/ansible_collections/community/general/plugins/doc_fragments/dimensiondata_wait.py b/ansible_collections/community/general/plugins/doc_fragments/dimensiondata_wait.py deleted file mode 100644 index ac3deab1..00000000 --- a/ansible_collections/community/general/plugins/doc_fragments/dimensiondata_wait.py +++ /dev/null @@ -1,36 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2016, Dimension Data -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -# Authors: -# - Adam Friedman - - -class ModuleDocFragment(object): - - # Dimension Data ("wait-for-completion" parameters) doc fragment - DOCUMENTATION = r''' - -options: - wait: - description: - - Should we wait for the task to complete before moving onto the next. - type: bool - default: no - wait_time: - description: - - The maximum amount of time (in seconds) to wait for the task to complete. - - Only applicable if I(wait=true). - type: int - default: 600 - wait_poll_interval: - description: - - The amount of time (in seconds) to wait between checks for task completion. - - Only applicable if I(wait=true). - type: int - default: 2 - ''' diff --git a/ansible_collections/community/general/plugins/doc_fragments/gitlab.py b/ansible_collections/community/general/plugins/doc_fragments/gitlab.py deleted file mode 100644 index 21e4584f..00000000 --- a/ansible_collections/community/general/plugins/doc_fragments/gitlab.py +++ /dev/null @@ -1,31 +0,0 @@ -# -*- coding: utf-8 -*- - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - - # Standard files documentation fragment - DOCUMENTATION = r''' -requirements: - - requests (Python library U(https://pypi.org/project/requests/)) - -options: - api_token: - description: - - GitLab access token with API permissions. - type: str - api_oauth_token: - description: - - GitLab OAuth token for logging in. - type: str - version_added: 4.2.0 - api_job_token: - description: - - GitLab CI job token for logging in. - type: str - version_added: 4.2.0 -''' diff --git a/ansible_collections/community/general/plugins/doc_fragments/hpe3par.py b/ansible_collections/community/general/plugins/doc_fragments/hpe3par.py deleted file mode 100644 index ad445205..00000000 --- a/ansible_collections/community/general/plugins/doc_fragments/hpe3par.py +++ /dev/null @@ -1,36 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ -# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - - # HPE 3PAR doc fragment - DOCUMENTATION = ''' -options: - storage_system_ip: - description: - - The storage system IP address. - type: str - required: true - storage_system_password: - description: - - The storage system password. - type: str - required: true - storage_system_username: - description: - - The storage system user name. - type: str - required: true - -requirements: - - hpe3par_sdk >= 1.0.2. Install using 'pip install hpe3par_sdk' - - WSAPI service should be enabled on the 3PAR storage array. -notes: - - check_mode not supported - ''' diff --git a/ansible_collections/community/general/plugins/doc_fragments/influxdb.py b/ansible_collections/community/general/plugins/doc_fragments/influxdb.py deleted file mode 100644 index a31c84cb..00000000 --- a/ansible_collections/community/general/plugins/doc_fragments/influxdb.py +++ /dev/null @@ -1,82 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, Ansible Project -# Copyright: (c) 2017, Abhijeet Kasurde (akasurde@redhat.com) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - # Parameters for influxdb modules - DOCUMENTATION = r''' -options: - hostname: - description: - - The hostname or IP address on which InfluxDB server is listening. - - Since Ansible 2.5, defaulted to localhost. - type: str - default: localhost - username: - description: - - Username that will be used to authenticate against InfluxDB server. - - Alias C(login_username) added in Ansible 2.5. - type: str - default: root - aliases: [ login_username ] - password: - description: - - Password that will be used to authenticate against InfluxDB server. - - Alias C(login_password) added in Ansible 2.5. - type: str - default: root - aliases: [ login_password ] - port: - description: - - The port on which InfluxDB server is listening - type: int - default: 8086 - path: - description: - - The path on which InfluxDB server is accessible - - Only available when using python-influxdb >= 5.1.0 - type: str - version_added: '0.2.0' - validate_certs: - description: - - If set to C(no), the SSL certificates will not be validated. - - This should only set to C(no) used on personally controlled sites using self-signed certificates. - type: bool - default: yes - ssl: - description: - - Use https instead of http to connect to InfluxDB server. - type: bool - default: false - timeout: - description: - - Number of seconds Requests will wait for client to establish a connection. - type: int - retries: - description: - - Number of retries client will try before aborting. - - C(0) indicates try until success. - - Only available when using python-influxdb >= 4.1.0 - type: int - default: 3 - use_udp: - description: - - Use UDP to connect to InfluxDB server. - type: bool - default: false - udp_port: - description: - - UDP port to connect to InfluxDB server. - type: int - default: 4444 - proxies: - description: - - HTTP(S) proxy to use for Requests to connect to InfluxDB server. - type: dict -''' diff --git a/ansible_collections/community/general/plugins/doc_fragments/ipa.py b/ansible_collections/community/general/plugins/doc_fragments/ipa.py deleted file mode 100644 index 47bcee60..00000000 --- a/ansible_collections/community/general/plugins/doc_fragments/ipa.py +++ /dev/null @@ -1,75 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017-18, Ansible Project -# Copyright: (c) 2017-18, Abhijeet Kasurde (akasurde@redhat.com) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - # Parameters for FreeIPA/IPA modules - DOCUMENTATION = r''' -options: - ipa_port: - description: - - Port of FreeIPA / IPA server. - - If the value is not specified in the task, the value of environment variable C(IPA_PORT) will be used instead. - - If both the environment variable C(IPA_PORT) and the value are not specified in the task, then default value is set. - - Environment variable fallback mechanism is added in Ansible 2.5. - type: int - default: 443 - ipa_host: - description: - - IP or hostname of IPA server. - - If the value is not specified in the task, the value of environment variable C(IPA_HOST) will be used instead. - - If both the environment variable C(IPA_HOST) and the value are not specified in the task, then DNS will be used to try to discover the FreeIPA server. - - The relevant entry needed in FreeIPA is the 'ipa-ca' entry. - - If neither the DNS entry, nor the environment C(IPA_HOST), nor the value are available in the task, then the default value will be used. - - Environment variable fallback mechanism is added in Ansible 2.5. - type: str - default: ipa.example.com - ipa_user: - description: - - Administrative account used on IPA server. - - If the value is not specified in the task, the value of environment variable C(IPA_USER) will be used instead. - - If both the environment variable C(IPA_USER) and the value are not specified in the task, then default value is set. - - Environment variable fallback mechanism is added in Ansible 2.5. - type: str - default: admin - ipa_pass: - description: - - Password of administrative user. - - If the value is not specified in the task, the value of environment variable C(IPA_PASS) will be used instead. - - Note that if the 'urllib_gssapi' library is available, it is possible to use GSSAPI to authenticate to FreeIPA. - - If the environment variable C(KRB5CCNAME) is available, the module will use this kerberos credentials cache to authenticate to the FreeIPA server. - - If the environment variable C(KRB5_CLIENT_KTNAME) is available, and C(KRB5CCNAME) is not; the module will use this kerberos keytab to authenticate. - - If GSSAPI is not available, the usage of 'ipa_pass' is required. - - Environment variable fallback mechanism is added in Ansible 2.5. - type: str - ipa_prot: - description: - - Protocol used by IPA server. - - If the value is not specified in the task, the value of environment variable C(IPA_PROT) will be used instead. - - If both the environment variable C(IPA_PROT) and the value are not specified in the task, then default value is set. - - Environment variable fallback mechanism is added in Ansible 2.5. - type: str - choices: [ http, https ] - default: https - validate_certs: - description: - - This only applies if C(ipa_prot) is I(https). - - If set to C(no), the SSL certificates will not be validated. - - This should only set to C(no) used on personally controlled sites using self-signed certificates. - type: bool - default: yes - ipa_timeout: - description: - - Specifies idle timeout (in seconds) for the connection. - - For bulk operations, you may want to increase this in order to avoid timeout from IPA server. - - If the value is not specified in the task, the value of environment variable C(IPA_TIMEOUT) will be used instead. - - If both the environment variable C(IPA_TIMEOUT) and the value are not specified in the task, then default value is set. - type: int - default: 10 -''' diff --git a/ansible_collections/community/general/plugins/doc_fragments/keycloak.py b/ansible_collections/community/general/plugins/doc_fragments/keycloak.py deleted file mode 100644 index fab9a6e8..00000000 --- a/ansible_collections/community/general/plugins/doc_fragments/keycloak.py +++ /dev/null @@ -1,71 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, Eike Frost -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - - # Standard documentation fragment - DOCUMENTATION = r''' -options: - auth_keycloak_url: - description: - - URL to the Keycloak instance. - type: str - required: true - aliases: - - url - - auth_client_id: - description: - - OpenID Connect I(client_id) to authenticate to the API with. - type: str - default: admin-cli - - auth_realm: - description: - - Keycloak realm name to authenticate to for API access. - type: str - - auth_client_secret: - description: - - Client Secret to use in conjunction with I(auth_client_id) (if required). - type: str - - auth_username: - description: - - Username to authenticate for API access with. - type: str - aliases: - - username - - auth_password: - description: - - Password to authenticate for API access with. - type: str - aliases: - - password - - token: - description: - - Authentication token for Keycloak API. - type: str - version_added: 3.0.0 - - validate_certs: - description: - - Verify TLS certificates (do not disable this in production). - type: bool - default: yes - - connection_timeout: - description: - - Controls the HTTP connections timeout period (in seconds) to Keycloak API. - type: int - default: 10 - version_added: 4.5.0 -''' diff --git a/ansible_collections/community/general/plugins/doc_fragments/ldap.py b/ansible_collections/community/general/plugins/doc_fragments/ldap.py deleted file mode 100644 index 1c9931fb..00000000 --- a/ansible_collections/community/general/plugins/doc_fragments/ldap.py +++ /dev/null @@ -1,64 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Peter Sagerson -# Copyright: (c) 2016, Jiri Tyr -# Copyright: (c) 2017-2018 Keller Fuchs (@KellerFuchs) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - # Standard LDAP documentation fragment - DOCUMENTATION = r''' -options: - bind_dn: - description: - - A DN to bind with. If this is omitted, we'll try a SASL bind with the EXTERNAL mechanism as default. - - If this is blank, we'll use an anonymous bind. - type: str - bind_pw: - description: - - The password to use with I(bind_dn). - type: str - dn: - required: true - description: - - The DN of the entry to add or remove. - type: str - referrals_chasing: - choices: [disabled, anonymous] - default: anonymous - type: str - description: - - Set the referrals chasing behavior. - - C(anonymous) follow referrals anonymously. This is the default behavior. - - C(disabled) disable referrals chasing. This sets C(OPT_REFERRALS) to off. - version_added: 2.0.0 - server_uri: - description: - - A URI to the LDAP server. - - The default value lets the underlying LDAP client library look for a UNIX domain socket in its default location. - type: str - default: ldapi:/// - start_tls: - description: - - If true, we'll use the START_TLS LDAP extension. - type: bool - default: no - validate_certs: - description: - - If set to C(no), SSL certificates will not be validated. - - This should only be used on sites using self-signed certificates. - type: bool - default: yes - sasl_class: - description: - - The class to use for SASL authentication. - - possible choices are C(external), C(gssapi). - type: str - choices: ['external', 'gssapi'] - default: external - version_added: "2.0.0" -''' diff --git a/ansible_collections/community/general/plugins/doc_fragments/manageiq.py b/ansible_collections/community/general/plugins/doc_fragments/manageiq.py deleted file mode 100644 index b610b512..00000000 --- a/ansible_collections/community/general/plugins/doc_fragments/manageiq.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, Daniel Korn -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - - # Standard ManageIQ documentation fragment - DOCUMENTATION = r''' -options: - manageiq_connection: - description: - - ManageIQ connection configuration information. - required: false - type: dict - suboptions: - url: - description: - - ManageIQ environment url. C(MIQ_URL) env var if set. otherwise, it is required to pass it. - type: str - required: false - username: - description: - - ManageIQ username. C(MIQ_USERNAME) env var if set. otherwise, required if no token is passed in. - type: str - password: - description: - - ManageIQ password. C(MIQ_PASSWORD) env var if set. otherwise, required if no token is passed in. - type: str - token: - description: - - ManageIQ token. C(MIQ_TOKEN) env var if set. otherwise, required if no username or password is passed in. - type: str - validate_certs: - description: - - Whether SSL certificates should be verified for HTTPS requests. defaults to True. - type: bool - default: yes - aliases: [ verify_ssl ] - ca_cert: - description: - - The path to a CA bundle file or directory with certificates. defaults to None. - type: str - aliases: [ ca_bundle_path ] - -requirements: - - 'manageiq-client U(https://github.com/ManageIQ/manageiq-api-client-python/)' -''' diff --git a/ansible_collections/community/general/plugins/doc_fragments/oneview.py b/ansible_collections/community/general/plugins/doc_fragments/oneview.py deleted file mode 100644 index 0d385e99..00000000 --- a/ansible_collections/community/general/plugins/doc_fragments/oneview.py +++ /dev/null @@ -1,79 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - - # OneView doc fragment - DOCUMENTATION = r''' -options: - config: - description: - - Path to a .json configuration file containing the OneView client configuration. - The configuration file is optional and when used should be present in the host running the ansible commands. - If the file path is not provided, the configuration will be loaded from environment variables. - For links to example configuration files or how to use the environment variables verify the notes section. - type: path - api_version: - description: - - OneView API Version. - type: int - image_streamer_hostname: - description: - - IP address or hostname for the HPE Image Streamer REST API. - type: str - hostname: - description: - - IP address or hostname for the appliance. - type: str - username: - description: - - Username for API authentication. - type: str - password: - description: - - Password for API authentication. - type: str - -requirements: - - python >= 2.7.9 - -notes: - - "A sample configuration file for the config parameter can be found at: - U(https://github.com/HewlettPackard/oneview-ansible/blob/master/examples/oneview_config-rename.json)" - - "Check how to use environment variables for configuration at: - U(https://github.com/HewlettPackard/oneview-ansible#environment-variables)" - - "Additional Playbooks for the HPE OneView Ansible modules can be found at: - U(https://github.com/HewlettPackard/oneview-ansible/tree/master/examples)" - - "The OneView API version used will directly affect returned and expected fields in resources. - Information on setting the desired API version and can be found at: - U(https://github.com/HewlettPackard/oneview-ansible#setting-your-oneview-version)" - ''' - - VALIDATEETAG = r''' -options: - validate_etag: - description: - - When the ETag Validation is enabled, the request will be conditionally processed only if the current ETag - for the resource matches the ETag provided in the data. - type: bool - default: yes -''' - - FACTSPARAMS = r''' -options: - params: - description: - - List of params to delimit, filter and sort the list of resources. - - "params allowed: - - C(start): The first item to return, using 0-based indexing. - - C(count): The number of resources to return. - - C(filter): A general filter/query string to narrow the list of items returned. - - C(sort): The sort order of the returned data set." - type: dict -''' diff --git a/ansible_collections/community/general/plugins/doc_fragments/online.py b/ansible_collections/community/general/plugins/doc_fragments/online.py deleted file mode 100644 index 4ad35bab..00000000 --- a/ansible_collections/community/general/plugins/doc_fragments/online.py +++ /dev/null @@ -1,44 +0,0 @@ -# -*- coding: utf-8 -*- - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - - # Standard documentation fragment - DOCUMENTATION = r''' -options: - api_token: - description: - - Online OAuth token. - type: str - required: true - aliases: [ oauth_token ] - api_url: - description: - - Online API URL - type: str - default: 'https://api.online.net' - aliases: [ base_url ] - api_timeout: - description: - - HTTP timeout to Online API in seconds. - type: int - default: 30 - aliases: [ timeout ] - validate_certs: - description: - - Validate SSL certs of the Online API. - type: bool - default: yes -notes: - - Also see the API documentation on U(https://console.online.net/en/api/) - - If C(api_token) is not set within the module, the following - environment variables can be used in decreasing order of precedence - C(ONLINE_TOKEN), C(ONLINE_API_KEY), C(ONLINE_OAUTH_TOKEN), C(ONLINE_API_TOKEN) - - If one wants to use a different C(api_url) one can also set the C(ONLINE_API_URL) - environment variable. -''' diff --git a/ansible_collections/community/general/plugins/doc_fragments/opennebula.py b/ansible_collections/community/general/plugins/doc_fragments/opennebula.py deleted file mode 100644 index 08b614a6..00000000 --- a/ansible_collections/community/general/plugins/doc_fragments/opennebula.py +++ /dev/null @@ -1,44 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, www.privaz.io Valletech AB -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - # OpenNebula common documentation - DOCUMENTATION = r''' -options: - api_url: - description: - - The ENDPOINT URL of the XMLRPC server. - - If not specified then the value of the ONE_URL environment variable, if any, is used. - type: str - aliases: - - api_endpoint - api_username: - description: - - The name of the user for XMLRPC authentication. - - If not specified then the value of the ONE_USERNAME environment variable, if any, is used. - type: str - api_password: - description: - - The password or token for XMLRPC authentication. - - If not specified then the value of the ONE_PASSWORD environment variable, if any, is used. - type: str - aliases: - - api_token - validate_certs: - description: - - Whether to validate the SSL certificates or not. - - This parameter is ignored if PYTHONHTTPSVERIFY environment variable is used. - type: bool - default: yes - wait_timeout: - description: - - Time to wait for the desired state to be reached before timeout, in seconds. - type: int - default: 300 -''' diff --git a/ansible_collections/community/general/plugins/doc_fragments/openswitch.py b/ansible_collections/community/general/plugins/doc_fragments/openswitch.py deleted file mode 100644 index 7ab7c155..00000000 --- a/ansible_collections/community/general/plugins/doc_fragments/openswitch.py +++ /dev/null @@ -1,84 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2015, Peter Sprygada -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - - # Standard files documentation fragment - DOCUMENTATION = r''' -options: - host: - description: - - Specifies the DNS host name or address for connecting to the remote - device over the specified transport. The value of host is used as - the destination address for the transport. Note this argument - does not affect the SSH argument. - type: str - port: - description: - - Specifies the port to use when building the connection to the remote - device. This value applies to either I(cli) or I(rest). The port - value will default to the appropriate transport common port if - none is provided in the task. (cli=22, http=80, https=443). Note - this argument does not affect the SSH transport. - type: int - default: 0 (use common port) - username: - description: - - Configures the username to use to authenticate the connection to - the remote device. This value is used to authenticate - either the CLI login or the eAPI authentication depending on which - transport is used. Note this argument does not affect the SSH - transport. If the value is not specified in the task, the value of - environment variable C(ANSIBLE_NET_USERNAME) will be used instead. - type: str - password: - description: - - Specifies the password to use to authenticate the connection to - the remote device. This is a common argument used for either I(cli) - or I(rest) transports. Note this argument does not affect the SSH - transport. If the value is not specified in the task, the value of - environment variable C(ANSIBLE_NET_PASSWORD) will be used instead. - type: str - timeout: - description: - - Specifies the timeout in seconds for communicating with the network device - for either connecting or sending commands. If the timeout is - exceeded before the operation is completed, the module will error. - type: int - default: 10 - ssh_keyfile: - description: - - Specifies the SSH key to use to authenticate the connection to - the remote device. This argument is only used for the I(cli) - transports. If the value is not specified in the task, the value of - environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead. - type: path - transport: - description: - - Configures the transport connection to use when connecting to the - remote device. The transport argument supports connectivity to the - device over ssh, cli or REST. - required: true - type: str - choices: [ cli, rest, ssh ] - default: ssh - use_ssl: - description: - - Configures the I(transport) to use SSL if set to C(yes) only when the - I(transport) argument is configured as rest. If the transport - argument is not I(rest), this value is ignored. - type: bool - default: yes - provider: - description: - - Convenience method that allows all I(openswitch) arguments to be passed as - a dict object. All constraints (required, choices, etc) must be - met either by individual arguments or values in this dict. - type: dict -''' diff --git a/ansible_collections/community/general/plugins/doc_fragments/oracle.py b/ansible_collections/community/general/plugins/doc_fragments/oracle.py deleted file mode 100644 index 94999c04..00000000 --- a/ansible_collections/community/general/plugins/doc_fragments/oracle.py +++ /dev/null @@ -1,83 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2018, Oracle and/or its affiliates. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - DOCUMENTATION = """ - requirements: - - "python >= 2.7" - - Python SDK for Oracle Cloud Infrastructure U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io) - notes: - - For OCI python sdk configuration, please refer to - U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/configuration.html) - options: - config_file_location: - description: - - Path to configuration file. If not set then the value of the OCI_CONFIG_FILE environment variable, - if any, is used. Otherwise, defaults to ~/.oci/config. - type: str - config_profile_name: - description: - - The profile to load from the config file referenced by C(config_file_location). If not set, then the - value of the OCI_CONFIG_PROFILE environment variable, if any, is used. Otherwise, defaults to the - "DEFAULT" profile in C(config_file_location). - default: "DEFAULT" - type: str - api_user: - description: - - The OCID of the user, on whose behalf, OCI APIs are invoked. If not set, then the - value of the OCI_USER_OCID environment variable, if any, is used. This option is required if the user - is not specified through a configuration file (See C(config_file_location)). To get the user's OCID, - please refer U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm). - type: str - api_user_fingerprint: - description: - - Fingerprint for the key pair being used. If not set, then the value of the OCI_USER_FINGERPRINT - environment variable, if any, is used. This option is required if the key fingerprint is not - specified through a configuration file (See C(config_file_location)). To get the key pair's - fingerprint value please refer - U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm). - type: str - api_user_key_file: - description: - - Full path and filename of the private key (in PEM format). If not set, then the value of the - OCI_USER_KEY_FILE variable, if any, is used. This option is required if the private key is - not specified through a configuration file (See C(config_file_location)). If the key is encrypted - with a pass-phrase, the C(api_user_key_pass_phrase) option must also be provided. - type: path - api_user_key_pass_phrase: - description: - - Passphrase used by the key referenced in C(api_user_key_file), if it is encrypted. If not set, then - the value of the OCI_USER_KEY_PASS_PHRASE variable, if any, is used. This option is required if the - key passphrase is not specified through a configuration file (See C(config_file_location)). - type: str - auth_type: - description: - - The type of authentication to use for making API requests. By default C(auth_type="api_key") based - authentication is performed and the API key (see I(api_user_key_file)) in your config file will be - used. If this 'auth_type' module option is not specified, the value of the OCI_ANSIBLE_AUTH_TYPE, - if any, is used. Use C(auth_type="instance_principal") to use instance principal based authentication - when running ansible playbooks within an OCI compute instance. - choices: ['api_key', 'instance_principal'] - default: 'api_key' - type: str - tenancy: - description: - - OCID of your tenancy. If not set, then the value of the OCI_TENANCY variable, if any, is - used. This option is required if the tenancy OCID is not specified through a configuration file - (See C(config_file_location)). To get the tenancy OCID, please refer - U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm) - type: str - region: - description: - - The Oracle Cloud Infrastructure region to use for all OCI API requests. If not set, then the - value of the OCI_REGION variable, if any, is used. This option is required if the region is - not specified through a configuration file (See C(config_file_location)). Please refer to - U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/regions.htm) for more information - on OCI regions. - type: str - """ diff --git a/ansible_collections/community/general/plugins/doc_fragments/oracle_display_name_option.py b/ansible_collections/community/general/plugins/doc_fragments/oracle_display_name_option.py deleted file mode 100644 index ff70d45d..00000000 --- a/ansible_collections/community/general/plugins/doc_fragments/oracle_display_name_option.py +++ /dev/null @@ -1,16 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2018, Oracle and/or its affiliates. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - DOCUMENTATION = """ - options: - display_name: - description: Use I(display_name) along with the other options to return only resources that match the given - display name exactly. - type: str - """ diff --git a/ansible_collections/community/general/plugins/doc_fragments/oracle_name_option.py b/ansible_collections/community/general/plugins/doc_fragments/oracle_name_option.py deleted file mode 100644 index 8c4f9c1e..00000000 --- a/ansible_collections/community/general/plugins/doc_fragments/oracle_name_option.py +++ /dev/null @@ -1,16 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2018, Oracle and/or its affiliates. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - DOCUMENTATION = """ - options: - name: - description: Use I(name) along with the other options to return only resources that match the given name - exactly. - type: str - """ diff --git a/ansible_collections/community/general/plugins/doc_fragments/oracle_wait_options.py b/ansible_collections/community/general/plugins/doc_fragments/oracle_wait_options.py deleted file mode 100644 index 0312755f..00000000 --- a/ansible_collections/community/general/plugins/doc_fragments/oracle_wait_options.py +++ /dev/null @@ -1,26 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2018, Oracle and/or its affiliates. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - DOCUMENTATION = """ - options: - wait: - description: Whether to wait for create or delete operation to complete. - default: yes - type: bool - wait_timeout: - description: Time, in seconds, to wait when I(wait=yes). - default: 1200 - type: int - wait_until: - description: The lifecycle state to wait for the resource to transition into when I(wait=yes). By default, - when I(wait=yes), we wait for the resource to get into ACTIVE/ATTACHED/AVAILABLE/PROVISIONED/ - RUNNING applicable lifecycle state during create operation & to get into DELETED/DETACHED/ - TERMINATED lifecycle state during delete operation. - type: str - """ diff --git a/ansible_collections/community/general/plugins/doc_fragments/proxmox.py b/ansible_collections/community/general/plugins/doc_fragments/proxmox.py deleted file mode 100644 index 165a7852..00000000 --- a/ansible_collections/community/general/plugins/doc_fragments/proxmox.py +++ /dev/null @@ -1,64 +0,0 @@ -# -*- coding: utf-8 -*- - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - # Common parameters for Proxmox VE modules - DOCUMENTATION = r''' -options: - api_host: - description: - - Specify the target host of the Proxmox VE cluster. - type: str - required: true - api_user: - description: - - Specify the user to authenticate with. - type: str - required: true - api_password: - description: - - Specify the password to authenticate with. - - You can use C(PROXMOX_PASSWORD) environment variable. - type: str - api_token_id: - description: - - Specify the token ID. - type: str - version_added: 1.3.0 - api_token_secret: - description: - - Specify the token secret. - type: str - version_added: 1.3.0 - validate_certs: - description: - - If C(no), SSL certificates will not be validated. - - This should only be used on personally controlled sites using self-signed certificates. - type: bool - default: no -requirements: [ "proxmoxer", "requests" ] -''' - - SELECTION = r''' -options: - vmid: - description: - - Specifies the instance ID. - - If not set the next available ID will be fetched from ProxmoxAPI. - type: int - node: - description: - - Proxmox VE node on which to operate. - - Only required for I(state=present). - - For every other states it will be autodiscovered. - type: str - pool: - description: - - Add the new VM to the specified pool. - type: str -''' diff --git a/ansible_collections/community/general/plugins/doc_fragments/purestorage.py b/ansible_collections/community/general/plugins/doc_fragments/purestorage.py deleted file mode 100644 index f35f0267..00000000 --- a/ansible_collections/community/general/plugins/doc_fragments/purestorage.py +++ /dev/null @@ -1,62 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, Simon Dodsley -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - - # Standard Pure Storage documentation fragment - DOCUMENTATION = r''' -options: - - See separate platform section for more details -requirements: - - See separate platform section for more details -notes: - - Ansible modules are available for the following Pure Storage products: FlashArray, FlashBlade -''' - - # Documentation fragment for FlashBlade - FB = r''' -options: - fb_url: - description: - - FlashBlade management IP address or Hostname. - type: str - api_token: - description: - - FlashBlade API token for admin privileged user. - type: str -notes: - - This module requires the C(purity_fb) Python library - - You must set C(PUREFB_URL) and C(PUREFB_API) environment variables - if I(fb_url) and I(api_token) arguments are not passed to the module directly -requirements: - - python >= 2.7 - - purity_fb >= 1.1 -''' - - # Documentation fragment for FlashArray - FA = r''' -options: - fa_url: - description: - - FlashArray management IPv4 address or Hostname. - type: str - required: true - api_token: - description: - - FlashArray API token for admin privileged user. - type: str - required: true -notes: - - This module requires the C(purestorage) Python library - - You must set C(PUREFA_URL) and C(PUREFA_API) environment variables - if I(fa_url) and I(api_token) arguments are not passed to the module directly -requirements: - - python >= 2.7 - - purestorage -''' diff --git a/ansible_collections/community/general/plugins/doc_fragments/rackspace.py b/ansible_collections/community/general/plugins/doc_fragments/rackspace.py deleted file mode 100644 index 0f57dd88..00000000 --- a/ansible_collections/community/general/plugins/doc_fragments/rackspace.py +++ /dev/null @@ -1,117 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2014, Matt Martz -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - - # Standard Rackspace only documentation fragment - DOCUMENTATION = r''' -options: - api_key: - description: - - Rackspace API key, overrides I(credentials). - type: str - aliases: [ password ] - credentials: - description: - - File to find the Rackspace credentials in. Ignored if I(api_key) and - I(username) are provided. - type: path - aliases: [ creds_file ] - env: - description: - - Environment as configured in I(~/.pyrax.cfg), - see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration). - type: str - region: - description: - - Region to create an instance in. - type: str - username: - description: - - Rackspace username, overrides I(credentials). - type: str - validate_certs: - description: - - Whether or not to require SSL validation of API endpoints. - type: bool - aliases: [ verify_ssl ] -requirements: - - python >= 2.6 - - pyrax -notes: - - The following environment variables can be used, C(RAX_USERNAME), - C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION). - - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file - appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating) - - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file - - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...) -''' - - # Documentation fragment including attributes to enable communication - # of other OpenStack clouds. Not all rax modules support this. - OPENSTACK = r''' -options: - api_key: - type: str - description: - - Rackspace API key, overrides I(credentials). - aliases: [ password ] - auth_endpoint: - type: str - description: - - The URI of the authentication service. - - If not specified will be set to U(https://identity.api.rackspacecloud.com/v2.0/) - credentials: - type: path - description: - - File to find the Rackspace credentials in. Ignored if I(api_key) and - I(username) are provided. - aliases: [ creds_file ] - env: - type: str - description: - - Environment as configured in I(~/.pyrax.cfg), - see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration). - identity_type: - type: str - description: - - Authentication mechanism to use, such as rackspace or keystone. - default: rackspace - region: - type: str - description: - - Region to create an instance in. - tenant_id: - type: str - description: - - The tenant ID used for authentication. - tenant_name: - type: str - description: - - The tenant name used for authentication. - username: - type: str - description: - - Rackspace username, overrides I(credentials). - validate_certs: - description: - - Whether or not to require SSL validation of API endpoints. - type: bool - aliases: [ verify_ssl ] -requirements: - - python >= 2.6 - - pyrax -notes: - - The following environment variables can be used, C(RAX_USERNAME), - C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION). - - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file - appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating) - - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file - - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...) -''' diff --git a/ansible_collections/community/general/plugins/doc_fragments/redis.py b/ansible_collections/community/general/plugins/doc_fragments/redis.py deleted file mode 100644 index e7af25ec..00000000 --- a/ansible_collections/community/general/plugins/doc_fragments/redis.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Andreas Botzner -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - # Common parameters for Redis modules - DOCUMENTATION = r''' -options: - login_host: - description: - - Specify the target host running the database. - default: localhost - type: str - login_port: - description: - - Specify the port to connect to. - default: 6379 - type: int - login_user: - description: - - Specify the user to authenticate with. - - Requires L(redis,https://pypi.org/project/redis) >= 3.4.0. - type: str - login_password: - description: - - Specify the password to authenticate with. - - Usually not used when target is localhost. - type: str - tls: - description: - - Specify whether or not to use TLS for the connection. - type: bool - default: true - validate_certs: - description: - - Specify whether or not to validate TLS certificates. - - This should only be turned off for personally controlled sites or with - C(localhost) as target. - type: bool - default: true - ca_certs: - description: - - Path to root certificates file. If not set and I(tls) is - set to C(true), certifi ca-certificates will be used. - type: str -requirements: [ "redis", "certifi" ] - -notes: - - Requires the C(redis) Python package on the remote host. You can - install it with pip (C(pip install redis)) or with a package manager. - Information on the library can be found at U(https://github.com/andymccurdy/redis-py). -''' diff --git a/ansible_collections/community/general/plugins/doc_fragments/rundeck.py b/ansible_collections/community/general/plugins/doc_fragments/rundeck.py deleted file mode 100644 index 056a54f3..00000000 --- a/ansible_collections/community/general/plugins/doc_fragments/rundeck.py +++ /dev/null @@ -1,31 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Phillipe Smith -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - - # Standard files documentation fragment - DOCUMENTATION = r''' -options: - url: - type: str - description: - - Rundeck instance URL. - required: true - api_version: - type: int - description: - - Rundeck API version to be used. - - API version must be at least 14. - default: 39 - api_token: - type: str - description: - - Rundeck User API Token. - required: true -''' diff --git a/ansible_collections/community/general/plugins/doc_fragments/scaleway.py b/ansible_collections/community/general/plugins/doc_fragments/scaleway.py deleted file mode 100644 index c1e1b13d..00000000 --- a/ansible_collections/community/general/plugins/doc_fragments/scaleway.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, Yanis Guenane -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - - # Standard documentation fragment - DOCUMENTATION = r''' -options: - api_token: - description: - - Scaleway OAuth token. - type: str - required: true - aliases: [ oauth_token ] - api_url: - description: - - Scaleway API URL. - type: str - default: https://api.scaleway.com - aliases: [ base_url ] - api_timeout: - description: - - HTTP timeout to Scaleway API in seconds. - type: int - default: 30 - aliases: [ timeout ] - query_parameters: - description: - - List of parameters passed to the query string. - type: dict - default: {} - validate_certs: - description: - - Validate SSL certs of the Scaleway API. - type: bool - default: yes -notes: - - Also see the API documentation on U(https://developer.scaleway.com/) - - If C(api_token) is not set within the module, the following - environment variables can be used in decreasing order of precedence - C(SCW_TOKEN), C(SCW_API_KEY), C(SCW_OAUTH_TOKEN) or C(SCW_API_TOKEN). - - If one wants to use a different C(api_url) one can also set the C(SCW_API_URL) - environment variable. -''' diff --git a/ansible_collections/community/general/plugins/doc_fragments/vexata.py b/ansible_collections/community/general/plugins/doc_fragments/vexata.py deleted file mode 100644 index d541d5ad..00000000 --- a/ansible_collections/community/general/plugins/doc_fragments/vexata.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2019, Sandeep Kasargod -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - - DOCUMENTATION = r''' -options: - - See respective platform section for more details -requirements: - - See respective platform section for more details -notes: - - Ansible modules are available for Vexata VX100 arrays. -''' - - # Documentation fragment for Vexata VX100 series - VX100 = r''' -options: - array: - description: - - Vexata VX100 array hostname or IPv4 Address. - required: true - type: str - user: - description: - - Vexata API user with administrative privileges. - required: false - type: str - password: - description: - - Vexata API user password. - required: false - type: str - validate_certs: - description: - - Allows connection when SSL certificates are not valid. Set to C(false) when certificates are not trusted. - - If set to C(yes), please make sure Python >= 2.7.9 is installed on the given machine. - required: false - type: bool - default: 'no' - -requirements: - - Vexata VX100 storage array with VXOS >= v3.5.0 on storage array - - vexatapi >= 0.0.1 - - python >= 2.7 - - VEXATA_USER and VEXATA_PASSWORD environment variables must be set if - user and password arguments are not passed to the module directly. -''' diff --git a/ansible_collections/community/general/plugins/doc_fragments/xenserver.py b/ansible_collections/community/general/plugins/doc_fragments/xenserver.py deleted file mode 100644 index 747bf02f..00000000 --- a/ansible_collections/community/general/plugins/doc_fragments/xenserver.py +++ /dev/null @@ -1,40 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, Bojan Vitnik -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - # Common parameters for XenServer modules - DOCUMENTATION = r''' -options: - hostname: - description: - - The hostname or IP address of the XenServer host or XenServer pool master. - - If the value is not specified in the task, the value of environment variable C(XENSERVER_HOST) will be used instead. - type: str - default: localhost - aliases: [ host, pool ] - username: - description: - - The username to use for connecting to XenServer. - - If the value is not specified in the task, the value of environment variable C(XENSERVER_USER) will be used instead. - type: str - default: root - aliases: [ admin, user ] - password: - description: - - The password to use for connecting to XenServer. - - If the value is not specified in the task, the value of environment variable C(XENSERVER_PASSWORD) will be used instead. - type: str - aliases: [ pass, pwd ] - validate_certs: - description: - - Allows connection when SSL certificates are not valid. Set to C(false) when certificates are not trusted. - - If the value is not specified in the task, the value of environment variable C(XENSERVER_VALIDATE_CERTS) will be used instead. - type: bool - default: yes -''' diff --git a/ansible_collections/community/general/plugins/filter/counter.py b/ansible_collections/community/general/plugins/filter/counter.py deleted file mode 100644 index ad957fce..00000000 --- a/ansible_collections/community/general/plugins/filter/counter.py +++ /dev/null @@ -1,36 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2021, Remy Keil -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from ansible.errors import AnsibleFilterError -from ansible.module_utils.common._collections_compat import Sequence -from collections import Counter - - -def counter(sequence): - ''' Count elements in a sequence. Returns dict with count result. ''' - if not isinstance(sequence, Sequence): - raise AnsibleFilterError('Argument for community.general.counter must be a sequence (string or list). %s is %s' % - (sequence, type(sequence))) - - try: - result = dict(Counter(sequence)) - except TypeError as e: - raise AnsibleFilterError( - "community.general.counter needs a sequence with hashable elements (int, float or str) - %s" % (e) - ) - return result - - -class FilterModule(object): - ''' Ansible counter jinja2 filters ''' - - def filters(self): - filters = { - 'counter': counter, - } - - return filters diff --git a/ansible_collections/community/general/plugins/filter/dict.py b/ansible_collections/community/general/plugins/filter/dict.py deleted file mode 100644 index 3d20e752..00000000 --- a/ansible_collections/community/general/plugins/filter/dict.py +++ /dev/null @@ -1,24 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Felix Fontein -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -def dict_filter(sequence): - '''Convert a list of tuples to a dictionary. - - Example: ``[[1, 2], ['a', 'b']] | community.general.dict`` results in ``{1: 2, 'a': 'b'}`` - ''' - return dict(sequence) - - -class FilterModule(object): - '''Ansible jinja2 filters''' - - def filters(self): - return { - 'dict': dict_filter, - } diff --git a/ansible_collections/community/general/plugins/filter/dict_kv.py b/ansible_collections/community/general/plugins/filter/dict_kv.py deleted file mode 100644 index 7ce6c3e4..00000000 --- a/ansible_collections/community/general/plugins/filter/dict_kv.py +++ /dev/null @@ -1,71 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2020 Stanislav German-Evtushenko (@giner) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -def dict_kv(value, key): - '''Return a dictionary with a single key-value pair - - Example: - - - hosts: localhost - gather_facts: false - vars: - myvar: myvalue - tasks: - - debug: - msg: "{{ myvar | dict_kv('thatsmyvar') }}" - - produces: - - ok: [localhost] => { - "msg": { - "thatsmyvar": "myvalue" - } - } - - Example 2: - - - hosts: localhost - gather_facts: false - vars: - common_config: - type: host - database: all - myservers: - - server1 - - server2 - tasks: - - debug: - msg: "{{ myservers | map('dict_kv', 'server') | map('combine', common_config) }}" - - produces: - - ok: [localhost] => { - "msg": [ - { - "database": "all", - "server": "server1", - "type": "host" - }, - { - "database": "all", - "server": "server2", - "type": "host" - } - ] - } - ''' - return {key: value} - - -class FilterModule(object): - ''' Query filter ''' - - def filters(self): - return { - 'dict_kv': dict_kv - } diff --git a/ansible_collections/community/general/plugins/filter/from_csv.py b/ansible_collections/community/general/plugins/filter/from_csv.py deleted file mode 100644 index b66d4769..00000000 --- a/ansible_collections/community/general/plugins/filter/from_csv.py +++ /dev/null @@ -1,49 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso) -# Copyright: (c) 2018, Dag Wieers (@dagwieers) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -from ansible.errors import AnsibleFilterError -from ansible.module_utils.common.text.converters import to_native - -from ansible_collections.community.general.plugins.module_utils.csv import (initialize_dialect, read_csv, CSVError, - DialectNotAvailableError, - CustomDialectFailureError) - - -def from_csv(data, dialect='excel', fieldnames=None, delimiter=None, skipinitialspace=None, strict=None): - - dialect_params = { - "delimiter": delimiter, - "skipinitialspace": skipinitialspace, - "strict": strict, - } - - try: - dialect = initialize_dialect(dialect, **dialect_params) - except (CustomDialectFailureError, DialectNotAvailableError) as e: - raise AnsibleFilterError(to_native(e)) - - reader = read_csv(data, dialect, fieldnames) - - data_list = [] - - try: - for row in reader: - data_list.append(row) - except CSVError as e: - raise AnsibleFilterError("Unable to process file: %s" % to_native(e)) - - return data_list - - -class FilterModule(object): - - def filters(self): - return { - 'from_csv': from_csv - } diff --git a/ansible_collections/community/general/plugins/filter/groupby.py b/ansible_collections/community/general/plugins/filter/groupby.py deleted file mode 100644 index a2a85aa9..00000000 --- a/ansible_collections/community/general/plugins/filter/groupby.py +++ /dev/null @@ -1,42 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2021, Felix Fontein -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from ansible.errors import AnsibleFilterError -from ansible.module_utils.common._collections_compat import Mapping, Sequence - - -def groupby_as_dict(sequence, attribute): - ''' - Given a sequence of dictionaries and an attribute name, returns a dictionary mapping - the value of this attribute to the dictionary. - - If multiple dictionaries in the sequence have the same value for this attribute, - the filter will fail. - ''' - if not isinstance(sequence, Sequence): - raise AnsibleFilterError('Input is not a sequence') - - result = dict() - for list_index, element in enumerate(sequence): - if not isinstance(element, Mapping): - raise AnsibleFilterError('Sequence element #{0} is not a mapping'.format(list_index)) - if attribute not in element: - raise AnsibleFilterError('Attribute not contained in element #{0} of sequence'.format(list_index)) - result_index = element[attribute] - if result_index in result: - raise AnsibleFilterError('Multiple sequence entries have attribute value {0!r}'.format(result_index)) - result[result_index] = element - return result - - -class FilterModule(object): - ''' Ansible list filters ''' - - def filters(self): - return { - 'groupby_as_dict': groupby_as_dict, - } diff --git a/ansible_collections/community/general/plugins/filter/jc.py b/ansible_collections/community/general/plugins/filter/jc.py deleted file mode 100644 index f8fc4ac5..00000000 --- a/ansible_collections/community/general/plugins/filter/jc.py +++ /dev/null @@ -1,95 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2015, Filipe Niero Felisbino -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# -# contributed by Kelly Brazil - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from ansible.errors import AnsibleError, AnsibleFilterError -import importlib - -try: - import jc - HAS_LIB = True -except ImportError: - HAS_LIB = False - - -def jc(data, parser, quiet=True, raw=False): - """Convert returned command output to JSON using the JC library - - Arguments: - - parser required (string) the correct parser for the input data (e.g. 'ifconfig') - see https://github.com/kellyjonbrazil/jc#parsers for latest list of parsers. - quiet optional (bool) True to suppress warning messages (default is True) - raw optional (bool) True to return pre-processed JSON (default is False) - - Returns: - - dictionary or list of dictionaries - - Example: - - - name: run date command - hosts: ubuntu - tasks: - - shell: date - register: result - - set_fact: - myvar: "{{ result.stdout | community.general.jc('date') }}" - - debug: - msg: "{{ myvar }}" - - produces: - - ok: [192.168.1.239] => { - "msg": { - "day": 9, - "hour": 22, - "minute": 6, - "month": "Aug", - "month_num": 8, - "second": 22, - "timezone": "UTC", - "weekday": "Sun", - "weekday_num": 1, - "year": 2020 - } - } - """ - - if not HAS_LIB: - raise AnsibleError('You need to install "jc" prior to running jc filter') - - try: - jc_parser = importlib.import_module('jc.parsers.' + parser) - return jc_parser.parse(data, quiet=quiet, raw=raw) - - except Exception as e: - raise AnsibleFilterError('Error in jc filter plugin: %s' % e) - - -class FilterModule(object): - ''' Query filter ''' - - def filters(self): - return { - 'jc': jc - } diff --git a/ansible_collections/community/general/plugins/filter/json_query.py b/ansible_collections/community/general/plugins/filter/json_query.py deleted file mode 100644 index 9c835e8c..00000000 --- a/ansible_collections/community/general/plugins/filter/json_query.py +++ /dev/null @@ -1,59 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2015, Filipe Niero Felisbino -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from ansible.errors import AnsibleError, AnsibleFilterError - -try: - import jmespath - HAS_LIB = True -except ImportError: - HAS_LIB = False - - -def json_query(data, expr): - '''Query data using jmespath query language ( http://jmespath.org ). Example: - - ansible.builtin.debug: msg="{{ instance | json_query(tagged_instances[*].block_device_mapping.*.volume_id') }}" - ''' - if not HAS_LIB: - raise AnsibleError('You need to install "jmespath" prior to running ' - 'json_query filter') - - # Hack to handle Ansible Unsafe text, AnsibleMapping and AnsibleSequence - # See issue: https://github.com/ansible-collections/community.general/issues/320 - jmespath.functions.REVERSE_TYPES_MAP['string'] = jmespath.functions.REVERSE_TYPES_MAP['string'] + ('AnsibleUnicode', 'AnsibleUnsafeText', ) - jmespath.functions.REVERSE_TYPES_MAP['array'] = jmespath.functions.REVERSE_TYPES_MAP['array'] + ('AnsibleSequence', ) - jmespath.functions.REVERSE_TYPES_MAP['object'] = jmespath.functions.REVERSE_TYPES_MAP['object'] + ('AnsibleMapping', ) - try: - return jmespath.search(expr, data) - except jmespath.exceptions.JMESPathError as e: - raise AnsibleFilterError('JMESPathError in json_query filter plugin:\n%s' % e) - except Exception as e: - # For older jmespath, we can get ValueError and TypeError without much info. - raise AnsibleFilterError('Error in jmespath.search in json_query filter plugin:\n%s' % e) - - -class FilterModule(object): - ''' Query filter ''' - - def filters(self): - return { - 'json_query': json_query - } diff --git a/ansible_collections/community/general/plugins/filter/list.py b/ansible_collections/community/general/plugins/filter/list.py deleted file mode 100644 index 005e4b7c..00000000 --- a/ansible_collections/community/general/plugins/filter/list.py +++ /dev/null @@ -1,117 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2020-2022, Vladimir Botka -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from ansible.errors import AnsibleFilterError -from ansible.module_utils.six import string_types -from ansible.module_utils.common._collections_compat import Mapping, Sequence -from ansible.utils.vars import merge_hash -from ansible.release import __version__ as ansible_version -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -from collections import defaultdict -from operator import itemgetter - - -def merge_hash_wrapper(x, y, recursive=False, list_merge='replace'): - ''' Wrapper of the function merge_hash from ansible.utils.vars. Only 2 paramaters are allowed - for Ansible 2.9 and lower.''' - - if LooseVersion(ansible_version) < LooseVersion('2.10'): - if list_merge != 'replace' or recursive: - msg = ("Non default options of list_merge(default=replace) or recursive(default=False) " - "are not allowed in Ansible version 2.9 or lower. Ansible version is %s, " - "recursive=%s, and list_merge=%s.") - raise AnsibleFilterError(msg % (ansible_version, recursive, list_merge)) - else: - return merge_hash(x, y) - else: - return merge_hash(x, y, recursive, list_merge) - - -def list_mergeby(x, y, index, recursive=False, list_merge='replace'): - ''' Merge 2 lists by attribute 'index'. The function merge_hash from ansible.utils.vars is used. - This function is used by the function lists_mergeby. - ''' - - d = defaultdict(dict) - for l in (x, y): - for elem in l: - if not isinstance(elem, Mapping): - msg = "Elements of list arguments for lists_mergeby must be dictionaries. %s is %s" - raise AnsibleFilterError(msg % (elem, type(elem))) - if index in elem.keys(): - d[elem[index]].update(merge_hash_wrapper(d[elem[index]], elem, recursive, list_merge)) - return sorted(d.values(), key=itemgetter(index)) - - -def lists_mergeby(*terms, **kwargs): - ''' Merge 2 or more lists by attribute 'index'. Optional parameters 'recursive' and 'list_merge' - control the merging of the lists in values. The function merge_hash from ansible.utils.vars - is used. To learn details on how to use the parameters 'recursive' and 'list_merge' see - Ansible User's Guide chapter "Using filters to manipulate data" section "Combining - hashes/dictionaries". - - Example: - - debug: - msg: "{{ list1| - community.general.lists_mergeby(list2, - 'index', - recursive=True, - list_merge='append')| - list }}" - ''' - - recursive = kwargs.pop('recursive', False) - list_merge = kwargs.pop('list_merge', 'replace') - if kwargs: - raise AnsibleFilterError("'recursive' and 'list_merge' are the only valid keyword arguments.") - if len(terms) < 2: - raise AnsibleFilterError("At least one list and index are needed.") - - # allow the user to do `[list1, list2, ...] | lists_mergeby('index')` - flat_list = [] - for sublist in terms[:-1]: - if not isinstance(sublist, Sequence): - msg = ("All arguments before the argument index for community.general.lists_mergeby " - "must be lists. %s is %s") - raise AnsibleFilterError(msg % (sublist, type(sublist))) - if len(sublist) > 0: - if all(isinstance(l, Sequence) for l in sublist): - for item in sublist: - flat_list.append(item) - else: - flat_list.append(sublist) - lists = flat_list - - if not lists: - return [] - - if len(lists) == 1: - return lists[0] - - index = terms[-1] - - if not isinstance(index, string_types): - msg = ("First argument after the lists for community.general.lists_mergeby must be string. " - "%s is %s") - raise AnsibleFilterError(msg % (index, type(index))) - - high_to_low_prio_list_iterator = reversed(lists) - result = next(high_to_low_prio_list_iterator) - for list in high_to_low_prio_list_iterator: - result = list_mergeby(list, result, index, recursive, list_merge) - - return result - - -class FilterModule(object): - ''' Ansible list filters ''' - - def filters(self): - return { - 'lists_mergeby': lists_mergeby, - } diff --git a/ansible_collections/community/general/plugins/filter/path_join_shim.py b/ansible_collections/community/general/plugins/filter/path_join_shim.py deleted file mode 100644 index 9734298a..00000000 --- a/ansible_collections/community/general/plugins/filter/path_join_shim.py +++ /dev/null @@ -1,28 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2020-2021, Felix Fontein -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -import os.path - - -def path_join(list): - '''Join list of paths. - - This is a minimal shim for ansible.builtin.path_join included in ansible-base 2.10. - This should only be called by Ansible 2.9 or earlier. See meta/runtime.yml for details. - ''' - return os.path.join(*list) - - -class FilterModule(object): - '''Ansible jinja2 filters''' - - def filters(self): - return { - 'path_join': path_join, - } diff --git a/ansible_collections/community/general/plugins/filter/random_mac.py b/ansible_collections/community/general/plugins/filter/random_mac.py deleted file mode 100644 index 7d25555a..00000000 --- a/ansible_collections/community/general/plugins/filter/random_mac.py +++ /dev/null @@ -1,74 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2020 Ansible Project -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import re -from random import Random, SystemRandom - -from ansible.errors import AnsibleFilterError -from ansible.module_utils.six import string_types - - -def random_mac(value, seed=None): - ''' takes string prefix, and return it completed with random bytes - to get a complete 6 bytes MAC address ''' - - if not isinstance(value, string_types): - raise AnsibleFilterError('Invalid value type (%s) for random_mac (%s)' % - (type(value), value)) - - value = value.lower() - mac_items = value.split(':') - - if len(mac_items) > 5: - raise AnsibleFilterError('Invalid value (%s) for random_mac: 5 colon(:) separated' - ' items max' % value) - - err = "" - for mac in mac_items: - if not mac: - err += ",empty item" - continue - if not re.match('[a-f0-9]{2}', mac): - err += ",%s not hexa byte" % mac - err = err.strip(',') - - if err: - raise AnsibleFilterError('Invalid value (%s) for random_mac: %s' % (value, err)) - - if seed is None: - r = SystemRandom() - else: - r = Random(seed) - # Generate random int between x1000000000 and xFFFFFFFFFF - v = r.randint(68719476736, 1099511627775) - # Select first n chars to complement input prefix - remain = 2 * (6 - len(mac_items)) - rnd = ('%x' % v)[:remain] - return value + re.sub(r'(..)', r':\1', rnd) - - -class FilterModule: - ''' Ansible jinja2 filters ''' - def filters(self): - return { - 'random_mac': random_mac, - } diff --git a/ansible_collections/community/general/plugins/filter/unicode_normalize.py b/ansible_collections/community/general/plugins/filter/unicode_normalize.py deleted file mode 100644 index 9afbf29e..00000000 --- a/ansible_collections/community/general/plugins/filter/unicode_normalize.py +++ /dev/null @@ -1,40 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -from unicodedata import normalize - -from ansible.errors import AnsibleFilterError, AnsibleFilterTypeError -from ansible.module_utils.six import text_type - - -def unicode_normalize(data, form='NFC'): - """Applies normalization to 'unicode' strings. - - Args: - data: A unicode string piped into the Jinja filter - form: One of ('NFC', 'NFD', 'NFKC', 'NFKD'). - See https://docs.python.org/3/library/unicodedata.html#unicodedata.normalize for more information. - - Returns: - A normalized unicode string of the specified 'form'. - """ - - if not isinstance(data, text_type): - raise AnsibleFilterTypeError("%s is not a valid input type" % type(data)) - - if form not in ('NFC', 'NFD', 'NFKC', 'NFKD'): - raise AnsibleFilterError("%s is not a valid form" % form) - - return normalize(form, data) - - -class FilterModule(object): - def filters(self): - return { - 'unicode_normalize': unicode_normalize, - } diff --git a/ansible_collections/community/general/plugins/filter/version_sort.py b/ansible_collections/community/general/plugins/filter/version_sort.py deleted file mode 100644 index ac62ef8c..00000000 --- a/ansible_collections/community/general/plugins/filter/version_sort.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2021 Eric Lavarde -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - - -def version_sort(value, reverse=False): - '''Sort a list according to loose versions so that e.g. 2.9 is smaller than 2.10''' - return sorted(value, key=LooseVersion, reverse=reverse) - - -class FilterModule(object): - ''' Version sort filter ''' - - def filters(self): - return { - 'version_sort': version_sort - } diff --git a/ansible_collections/community/general/plugins/inventory/cobbler.py b/ansible_collections/community/general/plugins/inventory/cobbler.py deleted file mode 100644 index d50acd0c..00000000 --- a/ansible_collections/community/general/plugins/inventory/cobbler.py +++ /dev/null @@ -1,286 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2020 Orion Poplawski -# Copyright (c) 2020 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - author: Orion Poplawski (@opoplawski) - name: cobbler - short_description: Cobbler inventory source - version_added: 1.0.0 - description: - - Get inventory hosts from the cobbler service. - - "Uses a configuration file as an inventory source, it must end in C(.cobbler.yml) or C(.cobbler.yaml) and has a C(plugin: cobbler) entry." - extends_documentation_fragment: - - inventory_cache - options: - plugin: - description: The name of this plugin, it should always be set to C(community.general.cobbler) for this plugin to recognize it as it's own. - required: yes - choices: [ 'cobbler', 'community.general.cobbler' ] - url: - description: URL to cobbler. - default: 'http://cobbler/cobbler_api' - env: - - name: COBBLER_SERVER - user: - description: Cobbler authentication user. - required: no - env: - - name: COBBLER_USER - password: - description: Cobbler authentication password - required: no - env: - - name: COBBLER_PASSWORD - cache_fallback: - description: Fallback to cached results if connection to cobbler fails - type: boolean - default: no - exclude_profiles: - description: - - Profiles to exclude from inventory. - - Ignored if I(include_profiles) is specified. - type: list - default: [] - elements: str - include_profiles: - description: - - Profiles to include from inventory. - - If specified, all other profiles will be excluded. - - I(exclude_profiles) is ignored if I(include_profiles) is specified. - type: list - default: [] - elements: str - version_added: 4.4.0 - group_by: - description: Keys to group hosts by - type: list - elements: string - default: [ 'mgmt_classes', 'owners', 'status' ] - group: - description: Group to place all hosts into - default: cobbler - group_prefix: - description: Prefix to apply to cobbler groups - default: cobbler_ - want_facts: - description: Toggle, if C(true) the plugin will retrieve host facts from the server - type: boolean - default: yes -''' - -EXAMPLES = ''' -# my.cobbler.yml -plugin: community.general.cobbler -url: http://cobbler/cobbler_api -user: ansible-tester -password: secure -''' - -import socket - -from ansible.errors import AnsibleError -from ansible.module_utils.common.text.converters import to_text -from ansible.module_utils.six import iteritems -from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, to_safe_group_name - -# xmlrpc -try: - import xmlrpclib as xmlrpc_client - HAS_XMLRPC_CLIENT = True -except ImportError: - try: - import xmlrpc.client as xmlrpc_client - HAS_XMLRPC_CLIENT = True - except ImportError: - HAS_XMLRPC_CLIENT = False - - -class InventoryModule(BaseInventoryPlugin, Cacheable): - ''' Host inventory parser for ansible using cobbler as source. ''' - - NAME = 'community.general.cobbler' - - def __init__(self): - super(InventoryModule, self).__init__() - self.cache_key = None - self.connection = None - - def verify_file(self, path): - valid = False - if super(InventoryModule, self).verify_file(path): - if path.endswith(('cobbler.yaml', 'cobbler.yml')): - valid = True - else: - self.display.vvv('Skipping due to inventory source not ending in "cobbler.yaml" nor "cobbler.yml"') - return valid - - def _get_connection(self): - if not HAS_XMLRPC_CLIENT: - raise AnsibleError('Could not import xmlrpc client library') - - if self.connection is None: - self.display.vvvv('Connecting to %s\n' % self.cobbler_url) - self.connection = xmlrpc_client.Server(self.cobbler_url, allow_none=True) - self.token = None - if self.get_option('user') is not None: - self.token = self.connection.login(self.get_option('user'), self.get_option('password')) - return self.connection - - def _init_cache(self): - if self.cache_key not in self._cache: - self._cache[self.cache_key] = {} - - def _reload_cache(self): - if self.get_option('cache_fallback'): - self.display.vvv('Cannot connect to server, loading cache\n') - self._options['cache_timeout'] = 0 - self.load_cache_plugin() - self._cache.get(self.cache_key, {}) - - def _get_profiles(self): - if not self.use_cache or 'profiles' not in self._cache.get(self.cache_key, {}): - c = self._get_connection() - try: - if self.token is not None: - data = c.get_profiles(self.token) - else: - data = c.get_profiles() - except (socket.gaierror, socket.error, xmlrpc_client.ProtocolError): - self._reload_cache() - else: - self._init_cache() - self._cache[self.cache_key]['profiles'] = data - - return self._cache[self.cache_key]['profiles'] - - def _get_systems(self): - if not self.use_cache or 'systems' not in self._cache.get(self.cache_key, {}): - c = self._get_connection() - try: - if self.token is not None: - data = c.get_systems(self.token) - else: - data = c.get_systems() - except (socket.gaierror, socket.error, xmlrpc_client.ProtocolError): - self._reload_cache() - else: - self._init_cache() - self._cache[self.cache_key]['systems'] = data - - return self._cache[self.cache_key]['systems'] - - def _add_safe_group_name(self, group, child=None): - group_name = self.inventory.add_group(to_safe_group_name('%s%s' % (self.get_option('group_prefix'), group.lower().replace(" ", "")))) - if child is not None: - self.inventory.add_child(group_name, child) - return group_name - - def _exclude_profile(self, profile): - if self.include_profiles: - return profile not in self.include_profiles - else: - return profile in self.exclude_profiles - - def parse(self, inventory, loader, path, cache=True): - - super(InventoryModule, self).parse(inventory, loader, path) - - # read config from file, this sets 'options' - self._read_config_data(path) - - # get connection host - self.cobbler_url = self.get_option('url') - self.cache_key = self.get_cache_key(path) - self.use_cache = cache and self.get_option('cache') - - self.exclude_profiles = self.get_option('exclude_profiles') - self.include_profiles = self.get_option('include_profiles') - self.group_by = self.get_option('group_by') - - for profile in self._get_profiles(): - if profile['parent']: - self.display.vvvv('Processing profile %s with parent %s\n' % (profile['name'], profile['parent'])) - if not self._exclude_profile(profile['parent']): - parent_group_name = self._add_safe_group_name(profile['parent']) - self.display.vvvv('Added profile parent group %s\n' % parent_group_name) - if not self._exclude_profile(profile['name']): - group_name = self._add_safe_group_name(profile['name']) - self.display.vvvv('Added profile group %s\n' % group_name) - self.inventory.add_child(parent_group_name, group_name) - else: - self.display.vvvv('Processing profile %s without parent\n' % profile['name']) - # Create a heirarchy of profile names - profile_elements = profile['name'].split('-') - i = 0 - while i < len(profile_elements) - 1: - profile_group = '-'.join(profile_elements[0:i + 1]) - profile_group_child = '-'.join(profile_elements[0:i + 2]) - if self._exclude_profile(profile_group): - self.display.vvvv('Excluding profile %s\n' % profile_group) - break - group_name = self._add_safe_group_name(profile_group) - self.display.vvvv('Added profile group %s\n' % group_name) - child_group_name = self._add_safe_group_name(profile_group_child) - self.display.vvvv('Added profile child group %s to %s\n' % (child_group_name, group_name)) - self.inventory.add_child(group_name, child_group_name) - i = i + 1 - - # Add default group for this inventory if specified - self.group = to_safe_group_name(self.get_option('group')) - if self.group is not None and self.group != '': - self.inventory.add_group(self.group) - self.display.vvvv('Added site group %s\n' % self.group) - - for host in self._get_systems(): - # Get the FQDN for the host and add it to the right groups - hostname = host['hostname'] # None - interfaces = host['interfaces'] - - if self._exclude_profile(host['profile']): - self.display.vvvv('Excluding host %s in profile %s\n' % (host['name'], host['profile'])) - continue - - # hostname is often empty for non-static IP hosts - if hostname == '': - for (iname, ivalue) in iteritems(interfaces): - if ivalue['management'] or not ivalue['static']: - this_dns_name = ivalue.get('dns_name', None) - if this_dns_name is not None and this_dns_name != "": - hostname = this_dns_name - self.display.vvvv('Set hostname to %s from %s\n' % (hostname, iname)) - - if hostname == '': - self.display.vvvv('Cannot determine hostname for host %s, skipping\n' % host['name']) - continue - - self.inventory.add_host(hostname) - self.display.vvvv('Added host %s hostname %s\n' % (host['name'], hostname)) - - # Add host to profile group - group_name = self._add_safe_group_name(host['profile'], child=hostname) - self.display.vvvv('Added host %s to profile group %s\n' % (hostname, group_name)) - - # Add host to groups specified by group_by fields - for group_by in self.group_by: - if host[group_by] == '<>': - groups = [] - else: - groups = [host[group_by]] if isinstance(host[group_by], str) else host[group_by] - for group in groups: - group_name = self._add_safe_group_name(group, child=hostname) - self.display.vvvv('Added host %s to group_by %s group %s\n' % (hostname, group_by, group_name)) - - # Add to group for this inventory - if self.group is not None: - self.inventory.add_child(self.group, hostname) - - # Add host variables - if self.get_option('want_facts'): - try: - self.inventory.set_variable(hostname, 'cobbler', host) - except ValueError as e: - self.display.warning("Could not set host info for %s: %s" % (hostname, to_text(e))) diff --git a/ansible_collections/community/general/plugins/inventory/linode.py b/ansible_collections/community/general/plugins/inventory/linode.py deleted file mode 100644 index 33ecc513..00000000 --- a/ansible_collections/community/general/plugins/inventory/linode.py +++ /dev/null @@ -1,324 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' - name: linode - author: - - Luke Murphy (@decentral1se) - short_description: Ansible dynamic inventory plugin for Linode. - requirements: - - python >= 2.7 - - linode_api4 >= 2.0.0 - description: - - Reads inventories from the Linode API v4. - - Uses a YAML configuration file that ends with linode.(yml|yaml). - - Linode labels are used by default as the hostnames. - - The default inventory groups are built from groups (deprecated by - Linode) and not tags. - extends_documentation_fragment: - - constructed - - inventory_cache - options: - cache: - version_added: 4.5.0 - cache_plugin: - version_added: 4.5.0 - cache_timeout: - version_added: 4.5.0 - cache_connection: - version_added: 4.5.0 - cache_prefix: - version_added: 4.5.0 - plugin: - description: Marks this as an instance of the 'linode' plugin. - required: true - choices: ['linode', 'community.general.linode'] - ip_style: - description: Populate hostvars with all information available from the Linode APIv4. - type: string - default: plain - choices: - - plain - - api - version_added: 3.6.0 - access_token: - description: The Linode account personal access token. - required: true - env: - - name: LINODE_ACCESS_TOKEN - regions: - description: Populate inventory with instances in this region. - default: [] - type: list - elements: string - tags: - description: Populate inventory only with instances which have at least one of the tags listed here. - default: [] - type: list - elements: string - version_added: 2.0.0 - types: - description: Populate inventory with instances with this type. - default: [] - type: list - elements: string - strict: - version_added: 2.0.0 - compose: - version_added: 2.0.0 - groups: - version_added: 2.0.0 - keyed_groups: - version_added: 2.0.0 -''' - -EXAMPLES = r''' -# Minimal example. `LINODE_ACCESS_TOKEN` is exposed in environment. -plugin: community.general.linode - -# You can use Jinja to template the access token. -plugin: community.general.linode -access_token: "{{ lookup('ini', 'token', section='your_username', file='~/.config/linode-cli') }}" -# For older Ansible versions, you need to write this as: -# access_token: "{{ lookup('ini', 'token section=your_username file=~/.config/linode-cli') }}" - -# Example with regions, types, groups and access token -plugin: community.general.linode -access_token: foobar -regions: - - eu-west -types: - - g5-standard-2 - -# Example with keyed_groups, groups, and compose -plugin: community.general.linode -access_token: foobar -keyed_groups: - - key: tags - separator: '' - - key: region - prefix: region -groups: - webservers: "'web' in (tags|list)" - mailservers: "'mail' in (tags|list)" -compose: - # By default, Ansible tries to connect to the label of the instance. - # Since that might not be a valid name to connect to, you can - # replace it with the first IPv4 address of the linode as follows: - ansible_ssh_host: ipv4[0] - ansible_port: 2222 - -# Example where control traffic limited to internal network -plugin: community.general.linode -access_token: foobar -ip_style: api -compose: - ansible_host: "ipv4 | community.general.json_query('[?public==`false`].address') | first" -''' - -import os - -from ansible.errors import AnsibleError, AnsibleParserError -from ansible.module_utils.six import string_types -from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable -from ansible.template import Templar - - -try: - from linode_api4 import LinodeClient - from linode_api4.objects.linode import Instance - from linode_api4.errors import ApiError as LinodeApiError - HAS_LINODE = True -except ImportError: - HAS_LINODE = False - - -class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): - - NAME = 'community.general.linode' - - def _build_client(self, loader): - """Build the Linode client.""" - - t = Templar(loader=loader) - - access_token = self.get_option('access_token') - if t.is_template(access_token): - access_token = t.template(variable=access_token, disable_lookups=False) - - if access_token is None: - try: - access_token = os.environ['LINODE_ACCESS_TOKEN'] - except KeyError: - pass - - if access_token is None: - raise AnsibleError(( - 'Could not retrieve Linode access token ' - 'from plugin configuration or environment' - )) - - self.client = LinodeClient(access_token) - - def _get_instances_inventory(self): - """Retrieve Linode instance information from cloud inventory.""" - try: - self.instances = self.client.linode.instances() - except LinodeApiError as exception: - raise AnsibleError('Linode client raised: %s' % exception) - - def _add_groups(self): - """Add Linode instance groups to the dynamic inventory.""" - self.linode_groups = set( - filter(None, [ - instance.group - for instance - in self.instances - ]) - ) - - for linode_group in self.linode_groups: - self.inventory.add_group(linode_group) - - def _filter_by_config(self): - """Filter instances by user specified configuration.""" - regions = self.get_option('regions') - if regions: - self.instances = [ - instance for instance in self.instances - if instance.region.id in regions - ] - - types = self.get_option('types') - if types: - self.instances = [ - instance for instance in self.instances - if instance.type.id in types - ] - - tags = self.get_option('tags') - if tags: - self.instances = [ - instance for instance in self.instances - if any(tag in instance.tags for tag in tags) - ] - - def _add_instances_to_groups(self): - """Add instance names to their dynamic inventory groups.""" - for instance in self.instances: - self.inventory.add_host(instance.label, group=instance.group) - - def _add_hostvars_for_instances(self): - """Add hostvars for instances in the dynamic inventory.""" - ip_style = self.get_option('ip_style') - for instance in self.instances: - hostvars = instance._raw_json - for hostvar_key in hostvars: - if ip_style == 'api' and hostvar_key in ['ipv4', 'ipv6']: - continue - self.inventory.set_variable( - instance.label, - hostvar_key, - hostvars[hostvar_key] - ) - if ip_style == 'api': - ips = instance.ips.ipv4.public + instance.ips.ipv4.private - ips += [instance.ips.ipv6.slaac, instance.ips.ipv6.link_local] - ips += instance.ips.ipv6.pools - - for ip_type in set(ip.type for ip in ips): - self.inventory.set_variable( - instance.label, - ip_type, - self._ip_data([ip for ip in ips if ip.type == ip_type]) - ) - - def _ip_data(self, ip_list): - data = [] - for ip in list(ip_list): - data.append( - { - 'address': ip.address, - 'subnet_mask': ip.subnet_mask, - 'gateway': ip.gateway, - 'public': ip.public, - 'prefix': ip.prefix, - 'rdns': ip.rdns, - 'type': ip.type - } - ) - return data - - def _cacheable_inventory(self): - return [i._raw_json for i in self.instances] - - def populate(self): - strict = self.get_option('strict') - - self._filter_by_config() - - self._add_groups() - self._add_instances_to_groups() - self._add_hostvars_for_instances() - for instance in self.instances: - variables = self.inventory.get_host(instance.label).get_vars() - self._add_host_to_composed_groups( - self.get_option('groups'), - variables, - instance.label, - strict=strict) - self._add_host_to_keyed_groups( - self.get_option('keyed_groups'), - variables, - instance.label, - strict=strict) - self._set_composite_vars( - self.get_option('compose'), - variables, - instance.label, - strict=strict) - - def verify_file(self, path): - """Verify the Linode configuration file.""" - if super(InventoryModule, self).verify_file(path): - endings = ('linode.yaml', 'linode.yml') - if any((path.endswith(ending) for ending in endings)): - return True - return False - - def parse(self, inventory, loader, path, cache=True): - """Dynamically parse Linode the cloud inventory.""" - super(InventoryModule, self).parse(inventory, loader, path) - self.instances = None - - if not HAS_LINODE: - raise AnsibleError('the Linode dynamic inventory plugin requires linode_api4.') - - self._read_config_data(path) - - cache_key = self.get_cache_key(path) - - if cache: - cache = self.get_option('cache') - - update_cache = False - if cache: - try: - self.instances = [Instance(None, i["id"], i) for i in self._cache[cache_key]] - except KeyError: - update_cache = True - - # Check for None rather than False in order to allow - # for empty sets of cached instances - if self.instances is None: - self._build_client(loader) - self._get_instances_inventory() - - if update_cache: - self._cache[cache_key] = self._cacheable_inventory() - - self.populate() diff --git a/ansible_collections/community/general/plugins/inventory/lxd.py b/ansible_collections/community/general/plugins/inventory/lxd.py deleted file mode 100644 index 91263850..00000000 --- a/ansible_collections/community/general/plugins/inventory/lxd.py +++ /dev/null @@ -1,1051 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Frank Dornheim -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' - name: lxd - short_description: Returns Ansible inventory from lxd host - description: - - Get inventory from the lxd. - - Uses a YAML configuration file that ends with 'lxd.(yml|yaml)'. - version_added: "3.0.0" - author: "Frank Dornheim (@conloos)" - requirements: - - ipaddress - - lxd >= 4.0 - options: - plugin: - description: Token that ensures this is a source file for the 'lxd' plugin. - required: true - choices: [ 'community.general.lxd' ] - url: - description: - - The unix domain socket path or the https URL for the lxd server. - - Sockets in filesystem have to start with C(unix:). - - Mostly C(unix:/var/lib/lxd/unix.socket) or C(unix:/var/snap/lxd/common/lxd/unix.socket). - default: unix:/var/snap/lxd/common/lxd/unix.socket - type: str - client_key: - description: - - The client certificate key file path. - aliases: [ key_file ] - default: $HOME/.config/lxc/client.key - type: path - client_cert: - description: - - The client certificate file path. - aliases: [ cert_file ] - default: $HOME/.config/lxc/client.crt - type: path - trust_password: - description: - - The client trusted password. - - You need to set this password on the lxd server before - running this module using the following command - C(lxc config set core.trust_password ) - See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/). - - If I(trust_password) is set, this module send a request for authentication before sending any requests. - type: str - state: - description: Filter the instance according to the current status. - type: str - default: none - choices: [ 'STOPPED', 'STARTING', 'RUNNING', 'none' ] - type_filter: - description: - - Filter the instances by type C(virtual-machine), C(container) or C(both). - - The first version of the inventory only supported containers. - type: str - default: container - choices: [ 'virtual-machine', 'container', 'both' ] - version_added: 4.2.0 - prefered_instance_network_interface: - description: - - If an instance has multiple network interfaces, select which one is the prefered as pattern. - - Combined with the first number that can be found e.g. 'eth' + 0. - - The option has been renamed from I(prefered_container_network_interface) to I(prefered_instance_network_interface) in community.general 3.8.0. - The old name still works as an alias. - type: str - default: eth - aliases: - - prefered_container_network_interface - prefered_instance_network_family: - description: - - If an instance has multiple network interfaces, which one is the prefered by family. - - Specify C(inet) for IPv4 and C(inet6) for IPv6. - type: str - default: inet - choices: [ 'inet', 'inet6' ] - groupby: - description: - - Create groups by the following keywords C(location), C(network_range), C(os), C(pattern), C(profile), C(release), C(type), C(vlanid). - - See example for syntax. - type: dict -''' - -EXAMPLES = ''' -# simple lxd.yml -plugin: community.general.lxd -url: unix:/var/snap/lxd/common/lxd/unix.socket - -# simple lxd.yml including filter -plugin: community.general.lxd -url: unix:/var/snap/lxd/common/lxd/unix.socket -state: RUNNING - -# simple lxd.yml including virtual machines and containers -plugin: community.general.lxd -url: unix:/var/snap/lxd/common/lxd/unix.socket -type_filter: both - -# grouping lxd.yml -groupby: - locationBerlin: - type: location - attribute: Berlin - netRangeIPv4: - type: network_range - attribute: 10.98.143.0/24 - netRangeIPv6: - type: network_range - attribute: fd42:bd00:7b11:2167:216:3eff::/24 - osUbuntu: - type: os - attribute: ubuntu - testpattern: - type: pattern - attribute: test - profileDefault: - type: profile - attribute: default - profileX11: - type: profile - attribute: x11 - releaseFocal: - type: release - attribute: focal - releaseBionic: - type: release - attribute: bionic - typeVM: - type: type - attribute: virtual-machine - typeContainer: - type: type - attribute: container - vlan666: - type: vlanid - attribute: 666 -''' - -import binascii -import json -import re -import time -import os -import socket -from ansible.plugins.inventory import BaseInventoryPlugin -from ansible.module_utils.common.text.converters import to_native, to_text -from ansible.module_utils.common.dict_transformations import dict_merge -from ansible.module_utils.six import raise_from -from ansible.errors import AnsibleError, AnsibleParserError -from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException - -try: - import ipaddress -except ImportError as exc: - IPADDRESS_IMPORT_ERROR = exc -else: - IPADDRESS_IMPORT_ERROR = None - - -class InventoryModule(BaseInventoryPlugin): - DEBUG = 4 - NAME = 'community.general.lxd' - SNAP_SOCKET_URL = 'unix:/var/snap/lxd/common/lxd/unix.socket' - SOCKET_URL = 'unix:/var/lib/lxd/unix.socket' - - @staticmethod - def load_json_data(path): - """Load json data - - Load json data from file - - Args: - list(path): Path elements - str(file_name): Filename of data - Kwargs: - None - Raises: - None - Returns: - dict(json_data): json data""" - try: - with open(path, 'r') as json_file: - return json.load(json_file) - except (IOError, json.decoder.JSONDecodeError) as err: - raise AnsibleParserError('Could not load the test data from {0}: {1}'.format(to_native(path), to_native(err))) - - def save_json_data(self, path, file_name=None): - """save data as json - - Save data as json file - - Args: - list(path): Path elements - str(file_name): Filename of data - Kwargs: - None - Raises: - None - Returns: - None""" - - if file_name: - path.append(file_name) - else: - prefix = 'lxd_data-' - time_stamp = time.strftime('%Y%m%d-%H%M%S') - suffix = '.atd' - path.append(prefix + time_stamp + suffix) - - try: - cwd = os.path.abspath(os.path.dirname(__file__)) - with open(os.path.abspath(os.path.join(cwd, *path)), 'w') as json_file: - json.dump(self.data, json_file) - except IOError as err: - raise AnsibleParserError('Could not save data: {0}'.format(to_native(err))) - - def verify_file(self, path): - """Check the config - - Return true/false if the config-file is valid for this plugin - - Args: - str(path): path to the config - Kwargs: - None - Raises: - None - Returns: - bool(valid): is valid""" - valid = False - if super(InventoryModule, self).verify_file(path): - if path.endswith(('lxd.yaml', 'lxd.yml')): - valid = True - else: - self.display.vvv('Inventory source not ending in "lxd.yaml" or "lxd.yml"') - return valid - - @staticmethod - def validate_url(url): - """validate url - - check whether the url is correctly formatted - - Args: - url - Kwargs: - None - Raises: - AnsibleError - Returns: - bool""" - if not isinstance(url, str): - return False - if not url.startswith(('unix:', 'https:')): - raise AnsibleError('URL is malformed: {0}'.format(to_native(url))) - return True - - def _connect_to_socket(self): - """connect to lxd socket - - Connect to lxd socket by provided url or defaults - - Args: - None - Kwargs: - None - Raises: - AnsibleError - Returns: - None""" - error_storage = {} - url_list = [self.get_option('url'), self.SNAP_SOCKET_URL, self.SOCKET_URL] - urls = (url for url in url_list if self.validate_url(url)) - for url in urls: - try: - socket_connection = LXDClient(url, self.client_key, self.client_cert, self.debug) - return socket_connection - except LXDClientException as err: - error_storage[url] = err - raise AnsibleError('No connection to the socket: {0}'.format(to_native(error_storage))) - - def _get_networks(self): - """Get Networknames - - Returns all network config names - - Args: - None - Kwargs: - None - Raises: - None - Returns: - list(names): names of all network_configs""" - # e.g. {'type': 'sync', - # 'status': 'Success', - # 'status_code': 200, - # 'operation': '', - # 'error_code': 0, - # 'error': '', - # 'metadata': ['/1.0/networks/lxdbr0']} - network_configs = self.socket.do('GET', '/1.0/networks') - return [m.split('/')[3] for m in network_configs['metadata']] - - def _get_instances(self): - """Get instancenames - - Returns all instancenames - - Args: - None - Kwargs: - None - Raises: - None - Returns: - list(names): names of all instances""" - # e.g. { - # "metadata": [ - # "/1.0/instances/foo", - # "/1.0/instances/bar" - # ], - # "status": "Success", - # "status_code": 200, - # "type": "sync" - # } - instances = self.socket.do('GET', '/1.0/instances') - return [m.split('/')[3] for m in instances['metadata']] - - def _get_config(self, branch, name): - """Get inventory of instance - - Get config of instance - - Args: - str(branch): Name oft the API-Branch - str(name): Name of instance - Kwargs: - None - Source: - https://github.com/lxc/lxd/blob/master/doc/rest-api.md - Raises: - None - Returns: - dict(config): Config of the instance""" - config = {} - if isinstance(branch, (tuple, list)): - config[name] = {branch[1]: self.socket.do('GET', '/1.0/{0}/{1}/{2}'.format(to_native(branch[0]), to_native(name), to_native(branch[1])))} - else: - config[name] = {branch: self.socket.do('GET', '/1.0/{0}/{1}'.format(to_native(branch), to_native(name)))} - return config - - def get_instance_data(self, names): - """Create Inventory of the instance - - Iterate through the different branches of the instances and collect Informations. - - Args: - list(names): List of instance names - Kwargs: - None - Raises: - None - Returns: - None""" - # tuple(('instances','metadata/templates')) to get section in branch - # e.g. /1.0/instances//metadata/templates - branches = ['instances', ('instances', 'state')] - instance_config = {} - for branch in branches: - for name in names: - instance_config['instances'] = self._get_config(branch, name) - self.data = dict_merge(instance_config, self.data) - - def get_network_data(self, names): - """Create Inventory of the instance - - Iterate through the different branches of the instances and collect Informations. - - Args: - list(names): List of instance names - Kwargs: - None - Raises: - None - Returns: - None""" - # tuple(('instances','metadata/templates')) to get section in branch - # e.g. /1.0/instances//metadata/templates - branches = [('networks', 'state')] - network_config = {} - for branch in branches: - for name in names: - try: - network_config['networks'] = self._get_config(branch, name) - except LXDClientException: - network_config['networks'] = {name: None} - self.data = dict_merge(network_config, self.data) - - def extract_network_information_from_instance_config(self, instance_name): - """Returns the network interface configuration - - Returns the network ipv4 and ipv6 config of the instance without local-link - - Args: - str(instance_name): Name oft he instance - Kwargs: - None - Raises: - None - Returns: - dict(network_configuration): network config""" - instance_network_interfaces = self._get_data_entry('instances/{0}/state/metadata/network'.format(instance_name)) - network_configuration = None - if instance_network_interfaces: - network_configuration = {} - gen_interface_names = [interface_name for interface_name in instance_network_interfaces if interface_name != 'lo'] - for interface_name in gen_interface_names: - gen_address = [address for address in instance_network_interfaces[interface_name]['addresses'] if address.get('scope') != 'link'] - network_configuration[interface_name] = [] - for address in gen_address: - address_set = {} - address_set['family'] = address.get('family') - address_set['address'] = address.get('address') - address_set['netmask'] = address.get('netmask') - address_set['combined'] = address.get('address') + '/' + address.get('netmask') - network_configuration[interface_name].append(address_set) - return network_configuration - - def get_prefered_instance_network_interface(self, instance_name): - """Helper to get the prefered interface of thr instance - - Helper to get the prefered interface provide by neme pattern from 'prefered_instance_network_interface'. - - Args: - str(containe_name): name of instance - Kwargs: - None - Raises: - None - Returns: - str(prefered_interface): None or interface name""" - instance_network_interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name)) - prefered_interface = None # init - if instance_network_interfaces: # instance have network interfaces - # generator if interfaces which start with the desired pattern - net_generator = [interface for interface in instance_network_interfaces if interface.startswith(self.prefered_instance_network_interface)] - selected_interfaces = [] # init - for interface in net_generator: - selected_interfaces.append(interface) - if len(selected_interfaces) > 0: - prefered_interface = sorted(selected_interfaces)[0] - return prefered_interface - - def get_instance_vlans(self, instance_name): - """Get VLAN(s) from instance - - Helper to get the VLAN_ID from the instance - - Args: - str(containe_name): name of instance - Kwargs: - None - Raises: - None - Returns: - None""" - # get network device configuration and store {network: vlan_id} - network_vlans = {} - for network in self._get_data_entry('networks'): - if self._get_data_entry('state/metadata/vlan/vid', data=self.data['networks'].get(network)): - network_vlans[network] = self._get_data_entry('state/metadata/vlan/vid', data=self.data['networks'].get(network)) - - # get networkdevices of instance and return - # e.g. - # "eth0":{ "name":"eth0", - # "network":"lxdbr0", - # "type":"nic"}, - vlan_ids = {} - devices = self._get_data_entry('instances/{0}/instances/metadata/expanded_devices'.format(to_native(instance_name))) - for device in devices: - if 'network' in devices[device]: - if devices[device]['network'] in network_vlans: - vlan_ids[devices[device].get('network')] = network_vlans[devices[device].get('network')] - return vlan_ids if vlan_ids else None - - def _get_data_entry(self, path, data=None, delimiter='/'): - """Helper to get data - - Helper to get data from self.data by a path like 'path/to/target' - Attention: Escaping of the delimiter is not (yet) provided. - - Args: - str(path): path to nested dict - Kwargs: - dict(data): datastore - str(delimiter): delimiter in Path. - Raises: - None - Returns: - *(value)""" - try: - if not data: - data = self.data - if delimiter in path: - path = path.split(delimiter) - - if isinstance(path, list) and len(path) > 1: - data = data[path.pop(0)] - path = delimiter.join(path) - return self._get_data_entry(path, data, delimiter) # recursion - return data[path] - except KeyError: - return None - - def _set_data_entry(self, instance_name, key, value, path=None): - """Helper to save data - - Helper to save the data in self.data - Detect if data is allready in branch and use dict_merge() to prevent that branch is overwritten. - - Args: - str(instance_name): name of instance - str(key): same as dict - *(value): same as dict - Kwargs: - str(path): path to branch-part - Raises: - AnsibleParserError - Returns: - None""" - if not path: - path = self.data['inventory'] - if instance_name not in path: - path[instance_name] = {} - - try: - if isinstance(value, dict) and key in path[instance_name]: - path[instance_name] = dict_merge(value, path[instance_name][key]) - else: - path[instance_name][key] = value - except KeyError as err: - raise AnsibleParserError("Unable to store Informations: {0}".format(to_native(err))) - - def extract_information_from_instance_configs(self): - """Process configuration information - - Preparation of the data - - Args: - dict(configs): instance configurations - Kwargs: - None - Raises: - None - Returns: - None""" - # create branch "inventory" - if 'inventory' not in self.data: - self.data['inventory'] = {} - - for instance_name in self.data['instances']: - self._set_data_entry(instance_name, 'os', self._get_data_entry( - 'instances/{0}/instances/metadata/config/image.os'.format(instance_name))) - self._set_data_entry(instance_name, 'release', self._get_data_entry( - 'instances/{0}/instances/metadata/config/image.release'.format(instance_name))) - self._set_data_entry(instance_name, 'version', self._get_data_entry( - 'instances/{0}/instances/metadata/config/image.version'.format(instance_name))) - self._set_data_entry(instance_name, 'profile', self._get_data_entry( - 'instances/{0}/instances/metadata/profiles'.format(instance_name))) - self._set_data_entry(instance_name, 'location', self._get_data_entry( - 'instances/{0}/instances/metadata/location'.format(instance_name))) - self._set_data_entry(instance_name, 'state', self._get_data_entry( - 'instances/{0}/instances/metadata/config/volatile.last_state.power'.format(instance_name))) - self._set_data_entry(instance_name, 'type', self._get_data_entry( - 'instances/{0}/instances/metadata/type'.format(instance_name))) - self._set_data_entry(instance_name, 'network_interfaces', self.extract_network_information_from_instance_config(instance_name)) - self._set_data_entry(instance_name, 'preferred_interface', self.get_prefered_instance_network_interface(instance_name)) - self._set_data_entry(instance_name, 'vlan_ids', self.get_instance_vlans(instance_name)) - - def build_inventory_network(self, instance_name): - """Add the network interfaces of the instance to the inventory - - Logic: - - if the instance have no interface -> 'ansible_connection: local' - - get preferred_interface & prefered_instance_network_family -> 'ansible_connection: ssh' & 'ansible_host: ' - - first Interface from: network_interfaces prefered_instance_network_family -> 'ansible_connection: ssh' & 'ansible_host: ' - - Args: - str(instance_name): name of instance - Kwargs: - None - Raises: - None - Returns: - None""" - - def interface_selection(instance_name): - """Select instance Interface for inventory - - Logic: - - get preferred_interface & prefered_instance_network_family -> str(IP) - - first Interface from: network_interfaces prefered_instance_network_family -> str(IP) - - Args: - str(instance_name): name of instance - Kwargs: - None - Raises: - None - Returns: - dict(interface_name: ip)""" - prefered_interface = self._get_data_entry('inventory/{0}/preferred_interface'.format(instance_name)) # name or None - prefered_instance_network_family = self.prefered_instance_network_family - - ip_address = '' - if prefered_interface: - interface = self._get_data_entry('inventory/{0}/network_interfaces/{1}'.format(instance_name, prefered_interface)) - for config in interface: - if config['family'] == prefered_instance_network_family: - ip_address = config['address'] - break - else: - interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name)) - for interface in interfaces.values(): - for config in interface: - if config['family'] == prefered_instance_network_family: - ip_address = config['address'] - break - return ip_address - - if self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name)): # instance have network interfaces - self.inventory.set_variable(instance_name, 'ansible_connection', 'ssh') - self.inventory.set_variable(instance_name, 'ansible_host', interface_selection(instance_name)) - else: - self.inventory.set_variable(instance_name, 'ansible_connection', 'local') - - def build_inventory_hosts(self): - """Build host-part dynamic inventory - - Build the host-part of the dynamic inventory. - Add Hosts and host_vars to the inventory. - - Args: - None - Kwargs: - None - Raises: - None - Returns: - None""" - for instance_name in self.data['inventory']: - instance_state = str(self._get_data_entry('inventory/{0}/state'.format(instance_name)) or "STOPPED").lower() - - # Only consider instances that match the "state" filter, if self.state is not None - if self.filter: - if self.filter.lower() != instance_state: - continue - # add instance - self.inventory.add_host(instance_name) - # add network informations - self.build_inventory_network(instance_name) - # add os - v = self._get_data_entry('inventory/{0}/os'.format(instance_name)) - if v: - self.inventory.set_variable(instance_name, 'ansible_lxd_os', v.lower()) - # add release - v = self._get_data_entry('inventory/{0}/release'.format(instance_name)) - if v: - self.inventory.set_variable(instance_name, 'ansible_lxd_release', v.lower()) - # add profile - self.inventory.set_variable(instance_name, 'ansible_lxd_profile', self._get_data_entry('inventory/{0}/profile'.format(instance_name))) - # add state - self.inventory.set_variable(instance_name, 'ansible_lxd_state', instance_state) - # add type - self.inventory.set_variable(instance_name, 'ansible_lxd_type', self._get_data_entry('inventory/{0}/type'.format(instance_name))) - # add location information - if self._get_data_entry('inventory/{0}/location'.format(instance_name)) != "none": # wrong type by lxd 'none' != 'None' - self.inventory.set_variable(instance_name, 'ansible_lxd_location', self._get_data_entry('inventory/{0}/location'.format(instance_name))) - # add VLAN_ID information - if self._get_data_entry('inventory/{0}/vlan_ids'.format(instance_name)): - self.inventory.set_variable(instance_name, 'ansible_lxd_vlan_ids', self._get_data_entry('inventory/{0}/vlan_ids'.format(instance_name))) - - def build_inventory_groups_location(self, group_name): - """create group by attribute: location - - Args: - str(group_name): Group name - Kwargs: - None - Raises: - None - Returns: - None""" - # maybe we just want to expand one group - if group_name not in self.inventory.groups: - self.inventory.add_group(group_name) - - for instance_name in self.inventory.hosts: - if 'ansible_lxd_location' in self.inventory.get_host(instance_name).get_vars(): - self.inventory.add_child(group_name, instance_name) - - def build_inventory_groups_pattern(self, group_name): - """create group by name pattern - - Args: - str(group_name): Group name - Kwargs: - None - Raises: - None - Returns: - None""" - # maybe we just want to expand one group - if group_name not in self.inventory.groups: - self.inventory.add_group(group_name) - - regex_pattern = self.groupby[group_name].get('attribute') - - for instance_name in self.inventory.hosts: - result = re.search(regex_pattern, instance_name) - if result: - self.inventory.add_child(group_name, instance_name) - - def build_inventory_groups_network_range(self, group_name): - """check if IP is in network-class - - Args: - str(group_name): Group name - Kwargs: - None - Raises: - None - Returns: - None""" - # maybe we just want to expand one group - if group_name not in self.inventory.groups: - self.inventory.add_group(group_name) - - try: - network = ipaddress.ip_network(to_text(self.groupby[group_name].get('attribute'))) - except ValueError as err: - raise AnsibleParserError( - 'Error while parsing network range {0}: {1}'.format(self.groupby[group_name].get('attribute'), to_native(err))) - - for instance_name in self.inventory.hosts: - if self.data['inventory'][instance_name].get('network_interfaces') is not None: - for interface in self.data['inventory'][instance_name].get('network_interfaces'): - for interface_family in self.data['inventory'][instance_name].get('network_interfaces')[interface]: - try: - address = ipaddress.ip_address(to_text(interface_family['address'])) - if address.version == network.version and address in network: - self.inventory.add_child(group_name, instance_name) - except ValueError: - # Ignore invalid IP addresses returned by lxd - pass - - def build_inventory_groups_os(self, group_name): - """create group by attribute: os - - Args: - str(group_name): Group name - Kwargs: - None - Raises: - None - Returns: - None""" - # maybe we just want to expand one group - if group_name not in self.inventory.groups: - self.inventory.add_group(group_name) - - gen_instances = [ - instance_name for instance_name in self.inventory.hosts - if 'ansible_lxd_os' in self.inventory.get_host(instance_name).get_vars()] - for instance_name in gen_instances: - if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_os'): - self.inventory.add_child(group_name, instance_name) - - def build_inventory_groups_release(self, group_name): - """create group by attribute: release - - Args: - str(group_name): Group name - Kwargs: - None - Raises: - None - Returns: - None""" - # maybe we just want to expand one group - if group_name not in self.inventory.groups: - self.inventory.add_group(group_name) - - gen_instances = [ - instance_name for instance_name in self.inventory.hosts - if 'ansible_lxd_release' in self.inventory.get_host(instance_name).get_vars()] - for instance_name in gen_instances: - if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_release'): - self.inventory.add_child(group_name, instance_name) - - def build_inventory_groups_profile(self, group_name): - """create group by attribute: profile - - Args: - str(group_name): Group name - Kwargs: - None - Raises: - None - Returns: - None""" - # maybe we just want to expand one group - if group_name not in self.inventory.groups: - self.inventory.add_group(group_name) - - gen_instances = [ - instance_name for instance_name in self.inventory.hosts.keys() - if 'ansible_lxd_profile' in self.inventory.get_host(instance_name).get_vars().keys()] - for instance_name in gen_instances: - if self.groupby[group_name].get('attribute').lower() in self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_profile'): - self.inventory.add_child(group_name, instance_name) - - def build_inventory_groups_vlanid(self, group_name): - """create group by attribute: vlanid - - Args: - str(group_name): Group name - Kwargs: - None - Raises: - None - Returns: - None""" - # maybe we just want to expand one group - if group_name not in self.inventory.groups: - self.inventory.add_group(group_name) - - gen_instances = [ - instance_name for instance_name in self.inventory.hosts.keys() - if 'ansible_lxd_vlan_ids' in self.inventory.get_host(instance_name).get_vars().keys()] - for instance_name in gen_instances: - if self.groupby[group_name].get('attribute') in self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_vlan_ids').values(): - self.inventory.add_child(group_name, instance_name) - - def build_inventory_groups_type(self, group_name): - """create group by attribute: type - - Args: - str(group_name): Group name - Kwargs: - None - Raises: - None - Returns: - None""" - # maybe we just want to expand one group - if group_name not in self.inventory.groups: - self.inventory.add_group(group_name) - - gen_instances = [ - instance_name for instance_name in self.inventory.hosts - if 'ansible_lxd_type' in self.inventory.get_host(instance_name).get_vars()] - for instance_name in gen_instances: - if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_type'): - self.inventory.add_child(group_name, instance_name) - - def build_inventory_groups(self): - """Build group-part dynamic inventory - - Build the group-part of the dynamic inventory. - Add groups to the inventory. - - Args: - None - Kwargs: - None - Raises: - None - Returns: - None""" - - def group_type(group_name): - """create groups defined by lxd.yml or defaultvalues - - create groups defined by lxd.yml or defaultvalues - supportetd: - * 'location' - * 'pattern' - * 'network_range' - * 'os' - * 'release' - * 'profile' - * 'vlanid' - * 'type' - - Args: - str(group_name): Group name - Kwargs: - None - Raises: - None - Returns: - None""" - - # Due to the compatibility with python 2 no use of map - if self.groupby[group_name].get('type') == 'location': - self.build_inventory_groups_location(group_name) - elif self.groupby[group_name].get('type') == 'pattern': - self.build_inventory_groups_pattern(group_name) - elif self.groupby[group_name].get('type') == 'network_range': - self.build_inventory_groups_network_range(group_name) - elif self.groupby[group_name].get('type') == 'os': - self.build_inventory_groups_os(group_name) - elif self.groupby[group_name].get('type') == 'release': - self.build_inventory_groups_release(group_name) - elif self.groupby[group_name].get('type') == 'profile': - self.build_inventory_groups_profile(group_name) - elif self.groupby[group_name].get('type') == 'vlanid': - self.build_inventory_groups_vlanid(group_name) - elif self.groupby[group_name].get('type') == 'type': - self.build_inventory_groups_type(group_name) - else: - raise AnsibleParserError('Unknown group type: {0}'.format(to_native(group_name))) - - if self.groupby: - for group_name in self.groupby: - if not group_name.isalnum(): - raise AnsibleParserError('Invalid character(s) in groupname: {0}'.format(to_native(group_name))) - group_type(group_name) - - def build_inventory(self): - """Build dynamic inventory - - Build the dynamic inventory. - - Args: - None - Kwargs: - None - Raises: - None - Returns: - None""" - - self.build_inventory_hosts() - self.build_inventory_groups() - - def cleandata(self): - """Clean the dynamic inventory - - The first version of the inventory only supported container. - This will change in the future. - The following function cleans up the data and remove the all items with the wrong type. - - Args: - None - Kwargs: - None - Raises: - None - Returns: - None""" - iter_keys = list(self.data['instances'].keys()) - for instance_name in iter_keys: - if self._get_data_entry('instances/{0}/instances/metadata/type'.format(instance_name)) != self.type_filter: - del self.data['instances'][instance_name] - - def _populate(self): - """Return the hosts and groups - - Returns the processed instance configurations from the lxd import - - Args: - None - Kwargs: - None - Raises: - None - Returns: - None""" - - if len(self.data) == 0: # If no data is injected by unittests open socket - self.socket = self._connect_to_socket() - self.get_instance_data(self._get_instances()) - self.get_network_data(self._get_networks()) - - # The first version of the inventory only supported containers. - # This will change in the future. - # The following function cleans up the data. - if self.type_filter != 'both': - self.cleandata() - - self.extract_information_from_instance_configs() - - # self.display.vvv(self.save_json_data([os.path.abspath(__file__)])) - - self.build_inventory() - - def parse(self, inventory, loader, path, cache): - """Return dynamic inventory from source - - Returns the processed inventory from the lxd import - - Args: - str(inventory): inventory object with existing data and - the methods to add hosts/groups/variables - to inventory - str(loader): Ansible's DataLoader - str(path): path to the config - bool(cache): use or avoid caches - Kwargs: - None - Raises: - AnsibleParserError - Returns: - None""" - if IPADDRESS_IMPORT_ERROR: - raise_from( - AnsibleError('another_library must be installed to use this plugin'), - IPADDRESS_IMPORT_ERROR) - - super(InventoryModule, self).parse(inventory, loader, path, cache=False) - # Read the inventory YAML file - self._read_config_data(path) - try: - self.client_key = self.get_option('client_key') - self.client_cert = self.get_option('client_cert') - self.debug = self.DEBUG - self.data = {} # store for inventory-data - self.groupby = self.get_option('groupby') - self.plugin = self.get_option('plugin') - self.prefered_instance_network_family = self.get_option('prefered_instance_network_family') - self.prefered_instance_network_interface = self.get_option('prefered_instance_network_interface') - self.type_filter = self.get_option('type_filter') - if self.get_option('state').lower() == 'none': # none in config is str() - self.filter = None - else: - self.filter = self.get_option('state').lower() - self.trust_password = self.get_option('trust_password') - self.url = self.get_option('url') - except Exception as err: - raise AnsibleParserError( - 'All correct options required: {0}'.format(to_native(err))) - # Call our internal helper to populate the dynamic inventory - self._populate() diff --git a/ansible_collections/community/general/plugins/inventory/nmap.py b/ansible_collections/community/general/plugins/inventory/nmap.py deleted file mode 100644 index 44d68750..00000000 --- a/ansible_collections/community/general/plugins/inventory/nmap.py +++ /dev/null @@ -1,215 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: nmap - short_description: Uses nmap to find hosts to target - description: - - Uses a YAML configuration file with a valid YAML extension. - extends_documentation_fragment: - - constructed - - inventory_cache - requirements: - - nmap CLI installed - options: - plugin: - description: token that ensures this is a source file for the 'nmap' plugin. - required: True - choices: ['nmap', 'community.general.nmap'] - address: - description: Network IP or range of IPs to scan, you can use a simple range (10.2.2.15-25) or CIDR notation. - required: True - exclude: - description: list of addresses to exclude - type: list - elements: string - ports: - description: Enable/disable scanning for open ports - type: boolean - default: True - ipv4: - description: use IPv4 type addresses - type: boolean - default: True - ipv6: - description: use IPv6 type addresses - type: boolean - default: True - notes: - - At least one of ipv4 or ipv6 is required to be True, both can be True, but they cannot both be False. - - 'TODO: add OS fingerprinting' -''' -EXAMPLES = ''' -# inventory.config file in YAML format -plugin: community.general.nmap -strict: False -address: 192.168.0.0/24 -''' - -import os -import re - -from subprocess import Popen, PIPE - -from ansible import constants as C -from ansible.errors import AnsibleParserError -from ansible.module_utils.common.text.converters import to_native, to_text -from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable -from ansible.module_utils.common.process import get_bin_path - - -class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): - - NAME = 'community.general.nmap' - find_host = re.compile(r'^Nmap scan report for ([\w,.,-]+)(?: \(([\w,.,:,\[,\]]+)\))?') - find_port = re.compile(r'^(\d+)/(\w+)\s+(\w+)\s+(\w+)') - - def __init__(self): - self._nmap = None - super(InventoryModule, self).__init__() - - def _populate(self, hosts): - # Use constructed if applicable - strict = self.get_option('strict') - - for host in hosts: - hostname = host['name'] - self.inventory.add_host(hostname) - for var, value in host.items(): - self.inventory.set_variable(hostname, var, value) - - # Composed variables - self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict) - - # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group - self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict) - - # Create groups based on variable values and add the corresponding hosts to it - self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict) - - def verify_file(self, path): - - valid = False - if super(InventoryModule, self).verify_file(path): - file_name, ext = os.path.splitext(path) - - if not ext or ext in C.YAML_FILENAME_EXTENSIONS: - valid = True - - return valid - - def parse(self, inventory, loader, path, cache=True): - - try: - self._nmap = get_bin_path('nmap') - except ValueError as e: - raise AnsibleParserError('nmap inventory plugin requires the nmap cli tool to work: {0}'.format(to_native(e))) - - super(InventoryModule, self).parse(inventory, loader, path, cache=cache) - - self._read_config_data(path) - - cache_key = self.get_cache_key(path) - - # cache may be True or False at this point to indicate if the inventory is being refreshed - # get the user's cache option too to see if we should save the cache if it is changing - user_cache_setting = self.get_option('cache') - - # read if the user has caching enabled and the cache isn't being refreshed - attempt_to_read_cache = user_cache_setting and cache - # update if the user has caching enabled and the cache is being refreshed; update this value to True if the cache has expired below - cache_needs_update = user_cache_setting and not cache - - if attempt_to_read_cache: - try: - results = self._cache[cache_key] - except KeyError: - # This occurs if the cache_key is not in the cache or if the cache_key expired, so the cache needs to be updated - cache_needs_update = True - - if not user_cache_setting or cache_needs_update: - # setup command - cmd = [self._nmap] - if not self._options['ports']: - cmd.append('-sP') - - if self._options['ipv4'] and not self._options['ipv6']: - cmd.append('-4') - elif self._options['ipv6'] and not self._options['ipv4']: - cmd.append('-6') - elif not self._options['ipv6'] and not self._options['ipv4']: - raise AnsibleParserError('One of ipv4 or ipv6 must be enabled for this plugin') - - if self._options['exclude']: - cmd.append('--exclude') - cmd.append(','.join(self._options['exclude'])) - - cmd.append(self._options['address']) - try: - # execute - p = Popen(cmd, stdout=PIPE, stderr=PIPE) - stdout, stderr = p.communicate() - if p.returncode != 0: - raise AnsibleParserError('Failed to run nmap, rc=%s: %s' % (p.returncode, to_native(stderr))) - - # parse results - host = None - ip = None - ports = [] - results = [] - - try: - t_stdout = to_text(stdout, errors='surrogate_or_strict') - except UnicodeError as e: - raise AnsibleParserError('Invalid (non unicode) input returned: %s' % to_native(e)) - - for line in t_stdout.splitlines(): - hits = self.find_host.match(line) - if hits: - if host is not None and ports: - results[-1]['ports'] = ports - - # if dns only shows arpa, just use ip instead as hostname - if hits.group(1).endswith('.in-addr.arpa'): - host = hits.group(2) - else: - host = hits.group(1) - - # if no reverse dns exists, just use ip instead as hostname - if hits.group(2) is not None: - ip = hits.group(2) - else: - ip = hits.group(1) - - if host is not None: - # update inventory - results.append(dict()) - results[-1]['name'] = host - results[-1]['ip'] = ip - ports = [] - continue - - host_ports = self.find_port.match(line) - if host is not None and host_ports: - ports.append({'port': host_ports.group(1), - 'protocol': host_ports.group(2), - 'state': host_ports.group(3), - 'service': host_ports.group(4)}) - continue - - # if any leftovers - if host and ports: - results[-1]['ports'] = ports - - except Exception as e: - raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e))) - - if cache_needs_update: - self._cache[cache_key] = results - - self._populate(results) diff --git a/ansible_collections/community/general/plugins/inventory/online.py b/ansible_collections/community/general/plugins/inventory/online.py deleted file mode 100644 index 00454f55..00000000 --- a/ansible_collections/community/general/plugins/inventory/online.py +++ /dev/null @@ -1,262 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2018 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' - name: online - author: - - Remy Leone (@remyleone) - short_description: Scaleway (previously Online SAS or Online.net) inventory source - description: - - Get inventory hosts from Scaleway (previously Online SAS or Online.net). - options: - plugin: - description: token that ensures this is a source file for the 'online' plugin. - required: True - choices: ['online', 'community.general.online'] - oauth_token: - required: True - description: Online OAuth token. - env: - # in order of precedence - - name: ONLINE_TOKEN - - name: ONLINE_API_KEY - - name: ONLINE_OAUTH_TOKEN - hostnames: - description: List of preference about what to use as an hostname. - type: list - elements: string - default: - - public_ipv4 - choices: - - public_ipv4 - - private_ipv4 - - hostname - groups: - description: List of groups. - type: list - elements: string - choices: - - location - - offer - - rpn -''' - -EXAMPLES = r''' -# online_inventory.yml file in YAML format -# Example command line: ansible-inventory --list -i online_inventory.yml - -plugin: community.general.online -hostnames: - - public_ipv4 -groups: - - location - - offer - - rpn -''' - -import json -from sys import version as python_version - -from ansible.errors import AnsibleError -from ansible.module_utils.urls import open_url -from ansible.plugins.inventory import BaseInventoryPlugin -from ansible.module_utils.common.text.converters import to_native, to_text -from ansible.module_utils.ansible_release import __version__ as ansible_version -from ansible.module_utils.six.moves.urllib.parse import urljoin - - -class InventoryModule(BaseInventoryPlugin): - NAME = 'community.general.online' - API_ENDPOINT = "https://api.online.net" - - def extract_public_ipv4(self, host_infos): - try: - return host_infos["network"]["ip"][0] - except (KeyError, TypeError, IndexError): - self.display.warning("An error happened while extracting public IPv4 address. Information skipped.") - return None - - def extract_private_ipv4(self, host_infos): - try: - return host_infos["network"]["private"][0] - except (KeyError, TypeError, IndexError): - self.display.warning("An error happened while extracting private IPv4 address. Information skipped.") - return None - - def extract_os_name(self, host_infos): - try: - return host_infos["os"]["name"] - except (KeyError, TypeError): - self.display.warning("An error happened while extracting OS name. Information skipped.") - return None - - def extract_os_version(self, host_infos): - try: - return host_infos["os"]["version"] - except (KeyError, TypeError): - self.display.warning("An error happened while extracting OS version. Information skipped.") - return None - - def extract_hostname(self, host_infos): - try: - return host_infos["hostname"] - except (KeyError, TypeError): - self.display.warning("An error happened while extracting hostname. Information skipped.") - return None - - def extract_location(self, host_infos): - try: - return host_infos["location"]["datacenter"] - except (KeyError, TypeError): - self.display.warning("An error happened while extracting datacenter location. Information skipped.") - return None - - def extract_offer(self, host_infos): - try: - return host_infos["offer"] - except (KeyError, TypeError): - self.display.warning("An error happened while extracting commercial offer. Information skipped.") - return None - - def extract_rpn(self, host_infos): - try: - return self.rpn_lookup_cache[host_infos["id"]] - except (KeyError, TypeError): - self.display.warning("An error happened while extracting RPN information. Information skipped.") - return None - - def _fetch_information(self, url): - try: - response = open_url(url, headers=self.headers) - except Exception as e: - self.display.warning("An error happened while fetching: %s" % url) - return None - - try: - raw_data = to_text(response.read(), errors='surrogate_or_strict') - except UnicodeError: - raise AnsibleError("Incorrect encoding of fetched payload from Online servers") - - try: - return json.loads(raw_data) - except ValueError: - raise AnsibleError("Incorrect JSON payload") - - @staticmethod - def extract_rpn_lookup_cache(rpn_list): - lookup = {} - for rpn in rpn_list: - for member in rpn["members"]: - lookup[member["id"]] = rpn["name"] - return lookup - - def _fill_host_variables(self, hostname, host_infos): - targeted_attributes = ( - "offer", - "id", - "hostname", - "location", - "boot_mode", - "power", - "last_reboot", - "anti_ddos", - "hardware_watch", - "support" - ) - for attribute in targeted_attributes: - self.inventory.set_variable(hostname, attribute, host_infos[attribute]) - - if self.extract_public_ipv4(host_infos=host_infos): - self.inventory.set_variable(hostname, "public_ipv4", self.extract_public_ipv4(host_infos=host_infos)) - self.inventory.set_variable(hostname, "ansible_host", self.extract_public_ipv4(host_infos=host_infos)) - - if self.extract_private_ipv4(host_infos=host_infos): - self.inventory.set_variable(hostname, "public_ipv4", self.extract_private_ipv4(host_infos=host_infos)) - - if self.extract_os_name(host_infos=host_infos): - self.inventory.set_variable(hostname, "os_name", self.extract_os_name(host_infos=host_infos)) - - if self.extract_os_version(host_infos=host_infos): - self.inventory.set_variable(hostname, "os_version", self.extract_os_name(host_infos=host_infos)) - - def _filter_host(self, host_infos, hostname_preferences): - - for pref in hostname_preferences: - if self.extractors[pref](host_infos): - return self.extractors[pref](host_infos) - - return None - - def do_server_inventory(self, host_infos, hostname_preferences, group_preferences): - - hostname = self._filter_host(host_infos=host_infos, - hostname_preferences=hostname_preferences) - - # No suitable hostname were found in the attributes and the host won't be in the inventory - if not hostname: - return - - self.inventory.add_host(host=hostname) - self._fill_host_variables(hostname=hostname, host_infos=host_infos) - - for g in group_preferences: - group = self.group_extractors[g](host_infos) - - if not group: - return - - self.inventory.add_group(group=group) - self.inventory.add_host(group=group, host=hostname) - - def parse(self, inventory, loader, path, cache=True): - super(InventoryModule, self).parse(inventory, loader, path) - self._read_config_data(path=path) - - token = self.get_option("oauth_token") - hostname_preferences = self.get_option("hostnames") - - group_preferences = self.get_option("groups") - if group_preferences is None: - group_preferences = [] - - self.extractors = { - "public_ipv4": self.extract_public_ipv4, - "private_ipv4": self.extract_private_ipv4, - "hostname": self.extract_hostname, - } - - self.group_extractors = { - "location": self.extract_location, - "offer": self.extract_offer, - "rpn": self.extract_rpn - } - - self.headers = { - 'Authorization': "Bearer %s" % token, - 'User-Agent': "ansible %s Python %s" % (ansible_version, python_version.split(' ', 1)[0]), - 'Content-type': 'application/json' - } - - servers_url = urljoin(InventoryModule.API_ENDPOINT, "api/v1/server") - servers_api_path = self._fetch_information(url=servers_url) - - if "rpn" in group_preferences: - rpn_groups_url = urljoin(InventoryModule.API_ENDPOINT, "api/v1/rpn/group") - rpn_list = self._fetch_information(url=rpn_groups_url) - self.rpn_lookup_cache = self.extract_rpn_lookup_cache(rpn_list) - - for server_api_path in servers_api_path: - - server_url = urljoin(InventoryModule.API_ENDPOINT, server_api_path) - raw_server_info = self._fetch_information(url=server_url) - - if raw_server_info is None: - continue - - self.do_server_inventory(host_infos=raw_server_info, - hostname_preferences=hostname_preferences, - group_preferences=group_preferences) diff --git a/ansible_collections/community/general/plugins/inventory/opennebula.py b/ansible_collections/community/general/plugins/inventory/opennebula.py deleted file mode 100644 index d967e13f..00000000 --- a/ansible_collections/community/general/plugins/inventory/opennebula.py +++ /dev/null @@ -1,239 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2020, FELDSAM s.r.o. - FeldHost™ -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type - -DOCUMENTATION = r''' - name: opennebula - author: - - Kristian Feldsam (@feldsam) - short_description: OpenNebula inventory source - version_added: "3.8.0" - extends_documentation_fragment: - - constructed - description: - - Get inventory hosts from OpenNebula cloud. - - Uses an YAML configuration file ending with either I(opennebula.yml) or I(opennebula.yaml) - to set parameter values. - - Uses I(api_authfile), C(~/.one/one_auth), or C(ONE_AUTH) pointing to a OpenNebula credentials file. - options: - plugin: - description: Token that ensures this is a source file for the 'opennebula' plugin. - type: string - required: true - choices: [ community.general.opennebula ] - api_url: - description: - - URL of the OpenNebula RPC server. - - It is recommended to use HTTPS so that the username/password are not - transferred over the network unencrypted. - - If not set then the value of the C(ONE_URL) environment variable is used. - env: - - name: ONE_URL - required: True - type: string - api_username: - description: - - Name of the user to login into the OpenNebula RPC server. If not set - then the value of the C(ONE_USERNAME) environment variable is used. - env: - - name: ONE_USERNAME - type: string - api_password: - description: - - Password or a token of the user to login into OpenNebula RPC server. - - If not set, the value of the C(ONE_PASSWORD) environment variable is used. - env: - - name: ONE_PASSWORD - required: False - type: string - api_authfile: - description: - - If both I(api_username) or I(api_password) are not set, then it will try - authenticate with ONE auth file. Default path is C(~/.one/one_auth). - - Set environment variable C(ONE_AUTH) to override this path. - env: - - name: ONE_AUTH - required: False - type: string - hostname: - description: Field to match the hostname. Note C(v4_first_ip) corresponds to the first IPv4 found on VM. - type: string - default: v4_first_ip - choices: - - v4_first_ip - - v6_first_ip - - name - filter_by_label: - description: Only return servers filtered by this label. - type: string - group_by_labels: - description: Create host groups by vm labels - type: bool - default: True -''' - -EXAMPLES = r''' -# inventory_opennebula.yml file in YAML format -# Example command line: ansible-inventory --list -i inventory_opennebula.yml - -# Pass a label filter to the API -plugin: community.general.opennebula -api_url: https://opennebula:2633/RPC2 -filter_by_label: Cache -''' - -try: - import pyone - - HAS_PYONE = True -except ImportError: - HAS_PYONE = False - -from ansible.errors import AnsibleError -from ansible.plugins.inventory import BaseInventoryPlugin, Constructable -from ansible.module_utils.common.text.converters import to_native - -from collections import namedtuple -import os - - -class InventoryModule(BaseInventoryPlugin, Constructable): - NAME = 'community.general.opennebula' - - def verify_file(self, path): - valid = False - if super(InventoryModule, self).verify_file(path): - if path.endswith(('opennebula.yaml', 'opennebula.yml')): - valid = True - return valid - - def _get_connection_info(self): - url = self.get_option('api_url') - username = self.get_option('api_username') - password = self.get_option('api_password') - authfile = self.get_option('api_authfile') - - if not username and not password: - if authfile is None: - authfile = os.path.join(os.environ.get("HOME"), ".one", "one_auth") - try: - with open(authfile, "r") as fp: - authstring = fp.read().rstrip() - username, password = authstring.split(":") - except (OSError, IOError): - raise AnsibleError("Could not find or read ONE_AUTH file at '{e}'".format(e=authfile)) - except Exception: - raise AnsibleError("Error occurs when reading ONE_AUTH file at '{e}'".format(e=authfile)) - - auth_params = namedtuple('auth', ('url', 'username', 'password')) - - return auth_params(url=url, username=username, password=password) - - def _get_vm_ipv4(self, vm): - nic = vm.TEMPLATE.get('NIC') - - if isinstance(nic, dict): - nic = [nic] - - for net in nic: - return net['IP'] - - return False - - def _get_vm_ipv6(self, vm): - nic = vm.TEMPLATE.get('NIC') - - if isinstance(nic, dict): - nic = [nic] - - for net in nic: - if net.get('IP6_GLOBAL'): - return net['IP6_GLOBAL'] - - return False - - def _get_vm_pool(self): - auth = self._get_connection_info() - - if not (auth.username and auth.password): - raise AnsibleError('API Credentials missing. Check OpenNebula inventory file.') - else: - one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password) - - # get hosts (VMs) - try: - vm_pool = one_client.vmpool.infoextended(-2, -1, -1, 3) - except Exception as e: - raise AnsibleError("Something happened during XML-RPC call: {e}".format(e=to_native(e))) - - return vm_pool - - def _retrieve_servers(self, label_filter=None): - vm_pool = self._get_vm_pool() - - result = [] - - # iterate over hosts - for vm in vm_pool.VM: - server = vm.USER_TEMPLATE - - labels = [] - if vm.USER_TEMPLATE.get('LABELS'): - labels = [s for s in vm.USER_TEMPLATE.get('LABELS') if s == ',' or s == '-' or s.isalnum() or s.isspace()] - labels = ''.join(labels) - labels = labels.replace(' ', '_') - labels = labels.replace('-', '_') - labels = labels.split(',') - - # filter by label - if label_filter is not None: - if label_filter not in labels: - continue - - server['name'] = vm.NAME - server['LABELS'] = labels - server['v4_first_ip'] = self._get_vm_ipv4(vm) - server['v6_first_ip'] = self._get_vm_ipv6(vm) - - result.append(server) - - return result - - def _populate(self): - hostname_preference = self.get_option('hostname') - group_by_labels = self.get_option('group_by_labels') - - # Add a top group 'one' - self.inventory.add_group(group='all') - - filter_by_label = self.get_option('filter_by_label') - for server in self._retrieve_servers(filter_by_label): - # check for labels - if group_by_labels and server['LABELS']: - for label in server['LABELS']: - self.inventory.add_group(group=label) - self.inventory.add_host(host=server['name'], group=label) - - self.inventory.add_host(host=server['name'], group='all') - - for attribute, value in server.items(): - self.inventory.set_variable(server['name'], attribute, value) - - if hostname_preference != 'name': - self.inventory.set_variable(server['name'], 'ansible_host', server[hostname_preference]) - - if server.get('SSH_PORT'): - self.inventory.set_variable(server['name'], 'ansible_port', server['SSH_PORT']) - - def parse(self, inventory, loader, path, cache=True): - if not HAS_PYONE: - raise AnsibleError('OpenNebula Inventory plugin requires pyone to work!') - - super(InventoryModule, self).parse(inventory, loader, path) - self._read_config_data(path=path) - - self._populate() diff --git a/ansible_collections/community/general/plugins/inventory/proxmox.py b/ansible_collections/community/general/plugins/inventory/proxmox.py deleted file mode 100644 index fc562974..00000000 --- a/ansible_collections/community/general/plugins/inventory/proxmox.py +++ /dev/null @@ -1,513 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2016 Guido Günther , Daniel Lobato Garcia -# Copyright (c) 2018 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - name: proxmox - short_description: Proxmox inventory source - version_added: "1.2.0" - author: - - Jeffrey van Pelt (@Thulium-Drake) - requirements: - - requests >= 1.1 - description: - - Get inventory hosts from a Proxmox PVE cluster. - - "Uses a configuration file as an inventory source, it must end in C(.proxmox.yml) or C(.proxmox.yaml)" - - Will retrieve the first network interface with an IP for Proxmox nodes. - - Can retrieve LXC/QEMU configuration as facts. - extends_documentation_fragment: - - constructed - - inventory_cache - options: - plugin: - description: The name of this plugin, it should always be set to C(community.general.proxmox) for this plugin to recognize it as it's own. - required: yes - choices: ['community.general.proxmox'] - type: str - url: - description: - - URL to Proxmox cluster. - - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_URL) will be used instead. - default: 'http://localhost:8006' - type: str - env: - - name: PROXMOX_URL - version_added: 2.0.0 - user: - description: - - Proxmox authentication user. - - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_USER) will be used instead. - required: yes - type: str - env: - - name: PROXMOX_USER - version_added: 2.0.0 - password: - description: - - Proxmox authentication password. - - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_PASSWORD) will be used instead. - required: yes - type: str - env: - - name: PROXMOX_PASSWORD - version_added: 2.0.0 - validate_certs: - description: Verify SSL certificate if using HTTPS. - type: boolean - default: yes - group_prefix: - description: Prefix to apply to Proxmox groups. - default: proxmox_ - type: str - facts_prefix: - description: Prefix to apply to LXC/QEMU config facts. - default: proxmox_ - type: str - want_facts: - description: Gather LXC/QEMU configuration facts. - default: no - type: bool - want_proxmox_nodes_ansible_host: - version_added: 3.0.0 - description: - - Whether to set C(ansbile_host) for proxmox nodes. - - When set to C(true) (default), will use the first available interface. This can be different from what you expect. - default: true - type: bool - filters: - version_added: 4.6.0 - description: A list of Jinja templates that allow filtering hosts. - type: list - elements: str - default: [] - strict: - version_added: 2.5.0 - compose: - version_added: 2.5.0 - groups: - version_added: 2.5.0 - keyed_groups: - version_added: 2.5.0 -''' - -EXAMPLES = ''' -# Minimal example which will not gather additional facts for QEMU/LXC guests -# By not specifying a URL the plugin will attempt to connect to the controller host on port 8006 -# my.proxmox.yml -plugin: community.general.proxmox -user: ansible@pve -password: secure - -# More complete example demonstrating the use of 'want_facts' and the constructed options -# Note that using facts returned by 'want_facts' in constructed options requires 'want_facts=true' -# my.proxmox.yml -plugin: community.general.proxmox -url: http://pve.domain.com:8006 -user: ansible@pve -password: secure -validate_certs: false -want_facts: true -keyed_groups: - # proxmox_tags_parsed is an example of a fact only returned when 'want_facts=true' - - key: proxmox_tags_parsed - separator: "" - prefix: group -groups: - webservers: "'web' in (proxmox_tags_parsed|list)" - mailservers: "'mail' in (proxmox_tags_parsed|list)" -compose: - ansible_port: 2222 - -# Using the inventory to allow ansible to connect via the first IP address of the VM / Container -# (Default is connection by name of QEMU/LXC guests) -# Note: my_inv_var demonstrates how to add a string variable to every host used by the inventory. -# my.proxmox.yml -plugin: community.general.proxmox -url: http://pve.domain.com:8006 -user: ansible@pve -password: secure -validate_certs: false -want_facts: true -compose: - ansible_host: proxmox_ipconfig0.ip | default(proxmox_net0.ip) | ipaddr('address') - my_inv_var_1: "'my_var1_value'" - my_inv_var_2: > - "my_var_2_value" -''' - -import itertools -import re - -from ansible.module_utils.common._collections_compat import MutableMapping - -from ansible.errors import AnsibleError -from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.utils.display import Display - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -# 3rd party imports -try: - import requests - if LooseVersion(requests.__version__) < LooseVersion('1.1.0'): - raise ImportError - HAS_REQUESTS = True -except ImportError: - HAS_REQUESTS = False - -display = Display() - - -class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): - ''' Host inventory parser for ansible using Proxmox as source. ''' - - NAME = 'community.general.proxmox' - - def __init__(self): - - super(InventoryModule, self).__init__() - - # from config - self.proxmox_url = None - - self.session = None - self.cache_key = None - self.use_cache = None - - def verify_file(self, path): - - valid = False - if super(InventoryModule, self).verify_file(path): - if path.endswith(('proxmox.yaml', 'proxmox.yml')): - valid = True - else: - self.display.vvv('Skipping due to inventory source not ending in "proxmox.yaml" nor "proxmox.yml"') - return valid - - def _get_session(self): - if not self.session: - self.session = requests.session() - self.session.verify = self.get_option('validate_certs') - return self.session - - def _get_auth(self): - credentials = urlencode({'username': self.proxmox_user, 'password': self.proxmox_password, }) - - a = self._get_session() - ret = a.post('%s/api2/json/access/ticket' % self.proxmox_url, data=credentials) - - json = ret.json() - - self.credentials = { - 'ticket': json['data']['ticket'], - 'CSRFPreventionToken': json['data']['CSRFPreventionToken'], - } - - def _get_json(self, url, ignore_errors=None): - - if not self.use_cache or url not in self._cache.get(self.cache_key, {}): - - if self.cache_key not in self._cache: - self._cache[self.cache_key] = {'url': ''} - - data = [] - s = self._get_session() - while True: - headers = {'Cookie': 'PVEAuthCookie={0}'.format(self.credentials['ticket'])} - ret = s.get(url, headers=headers) - if ignore_errors and ret.status_code in ignore_errors: - break - ret.raise_for_status() - json = ret.json() - - # process results - # FIXME: This assumes 'return type' matches a specific query, - # it will break if we expand the queries and they dont have different types - if 'data' not in json: - # /hosts/:id does not have a 'data' key - data = json - break - elif isinstance(json['data'], MutableMapping): - # /facts are returned as dict in 'data' - data = json['data'] - break - else: - # /hosts 's 'results' is a list of all hosts, returned is paginated - data = data + json['data'] - break - - self._cache[self.cache_key][url] = data - - return self._cache[self.cache_key][url] - - def _get_nodes(self): - return self._get_json("%s/api2/json/nodes" % self.proxmox_url) - - def _get_pools(self): - return self._get_json("%s/api2/json/pools" % self.proxmox_url) - - def _get_lxc_per_node(self, node): - return self._get_json("%s/api2/json/nodes/%s/lxc" % (self.proxmox_url, node)) - - def _get_qemu_per_node(self, node): - return self._get_json("%s/api2/json/nodes/%s/qemu" % (self.proxmox_url, node)) - - def _get_members_per_pool(self, pool): - ret = self._get_json("%s/api2/json/pools/%s" % (self.proxmox_url, pool)) - return ret['members'] - - def _get_node_ip(self, node): - ret = self._get_json("%s/api2/json/nodes/%s/network" % (self.proxmox_url, node)) - - for iface in ret: - try: - return iface['address'] - except Exception: - return None - - def _get_agent_network_interfaces(self, node, vmid, vmtype): - result = [] - - try: - ifaces = self._get_json( - "%s/api2/json/nodes/%s/%s/%s/agent/network-get-interfaces" % ( - self.proxmox_url, node, vmtype, vmid - ) - )['result'] - - if "error" in ifaces: - if "class" in ifaces["error"]: - # This happens on Windows, even though qemu agent is running, the IP address - # cannot be fetched, as it's unsupported, also a command disabled can happen. - errorClass = ifaces["error"]["class"] - if errorClass in ["Unsupported"]: - self.display.v("Retrieving network interfaces from guest agents on windows with older qemu-guest-agents is not supported") - elif errorClass in ["CommandDisabled"]: - self.display.v("Retrieving network interfaces from guest agents has been disabled") - return result - - for iface in ifaces: - result.append({ - 'name': iface['name'], - 'mac-address': iface['hardware-address'] if 'hardware-address' in iface else '', - 'ip-addresses': ["%s/%s" % (ip['ip-address'], ip['prefix']) for ip in iface['ip-addresses']] if 'ip-addresses' in iface else [] - }) - except requests.HTTPError: - pass - - return result - - def _get_vm_config(self, properties, node, vmid, vmtype, name): - ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/config" % (self.proxmox_url, node, vmtype, vmid)) - - properties[self._fact('node')] = node - properties[self._fact('vmid')] = vmid - properties[self._fact('vmtype')] = vmtype - - plaintext_configs = [ - 'description', - ] - - for config in ret: - key = self._fact(config) - value = ret[config] - try: - # fixup disk images as they have no key - if config == 'rootfs' or config.startswith(('virtio', 'sata', 'ide', 'scsi')): - value = ('disk_image=' + value) - - # Additional field containing parsed tags as list - if config == 'tags': - parsed_key = self.to_safe('%s%s' % (key, "_parsed")) - properties[parsed_key] = [tag.strip() for tag in value.split(",")] - - # The first field in the agent string tells you whether the agent is enabled - # the rest of the comma separated string is extra config for the agent - if config == 'agent' and int(value.split(',')[0]): - agent_iface_value = self._get_agent_network_interfaces(node, vmid, vmtype) - if agent_iface_value: - agent_iface_key = self.to_safe('%s%s' % (key, "_interfaces")) - properties[agent_iface_key] = agent_iface_value - - if config not in plaintext_configs and not isinstance(value, int) and all("=" in v for v in value.split(",")): - # split off strings with commas to a dict - # skip over any keys that cannot be processed - try: - value = dict(key.split("=", 1) for key in value.split(",")) - except Exception: - continue - - properties[key] = value - except NameError: - return None - - def _get_vm_status(self, properties, node, vmid, vmtype, name): - ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/status/current" % (self.proxmox_url, node, vmtype, vmid)) - properties[self._fact('status')] = ret['status'] - - def _get_vm_snapshots(self, properties, node, vmid, vmtype, name): - ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/snapshot" % (self.proxmox_url, node, vmtype, vmid)) - snapshots = [snapshot['name'] for snapshot in ret if snapshot['name'] != 'current'] - properties[self._fact('snapshots')] = snapshots - - def to_safe(self, word): - '''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups - #> ProxmoxInventory.to_safe("foo-bar baz") - 'foo_barbaz' - ''' - regex = r"[^A-Za-z0-9\_]" - return re.sub(regex, "_", word.replace(" ", "")) - - def _fact(self, name): - '''Generate a fact's full name from the common prefix and a name.''' - return self.to_safe('%s%s' % (self.facts_prefix, name.lower())) - - def _group(self, name): - '''Generate a group's full name from the common prefix and a name.''' - return self.to_safe('%s%s' % (self.group_prefix, name.lower())) - - def _can_add_host(self, name, properties): - '''Ensure that a host satisfies all defined hosts filters. If strict mode is - enabled, any error during host filter compositing will lead to an AnsibleError - being raised, otherwise the filter will be ignored. - ''' - for host_filter in self.host_filters: - try: - if not self._compose(host_filter, properties): - return False - except Exception as e: # pylint: disable=broad-except - message = "Could not evaluate host filter %s for host %s - %s" % (host_filter, name, to_native(e)) - if self.strict: - raise AnsibleError(message) - display.warning(message) - return True - - def _add_host(self, name, variables): - self.inventory.add_host(name) - for k, v in variables.items(): - self.inventory.set_variable(name, k, v) - variables = self.inventory.get_host(name).get_vars() - self._set_composite_vars(self.get_option('compose'), variables, name, strict=self.strict) - self._add_host_to_composed_groups(self.get_option('groups'), variables, name, strict=self.strict) - self._add_host_to_keyed_groups(self.get_option('keyed_groups'), variables, name, strict=self.strict) - - def _handle_item(self, node, ittype, item): - '''Handle an item from the list of LXC containers and Qemu VM. The - return value will be either None if the item was skipped or the name of - the item if it was added to the inventory.''' - if item.get('template'): - return None - - properties = dict() - name, vmid = item['name'], item['vmid'] - - # get status, config and snapshots if want_facts == True - if self.get_option('want_facts'): - self._get_vm_status(properties, node, vmid, ittype, name) - self._get_vm_config(properties, node, vmid, ittype, name) - self._get_vm_snapshots(properties, node, vmid, ittype, name) - - # ensure the host satisfies filters - if not self._can_add_host(name, properties): - return None - - # add the host to the inventory - self._add_host(name, properties) - node_type_group = self._group('%s_%s' % (node, ittype)) - self.inventory.add_child(self._group('all_' + ittype), name) - self.inventory.add_child(node_type_group, name) - if item['status'] == 'stopped': - self.inventory.add_child(self._group('all_stopped'), name) - elif item['status'] == 'running': - self.inventory.add_child(self._group('all_running'), name) - - return name - - def _populate_pool_groups(self, added_hosts): - '''Generate groups from Proxmox resource pools, ignoring VMs and - containers that were skipped.''' - for pool in self._get_pools(): - poolid = pool.get('poolid') - if not poolid: - continue - pool_group = self._group('pool_' + poolid) - self.inventory.add_group(pool_group) - - for member in self._get_members_per_pool(poolid): - name = member.get('name') - if name and name in added_hosts: - self.inventory.add_child(pool_group, name) - - def _populate(self): - - # create common groups - self.inventory.add_group(self._group('all_lxc')) - self.inventory.add_group(self._group('all_qemu')) - self.inventory.add_group(self._group('all_running')) - self.inventory.add_group(self._group('all_stopped')) - nodes_group = self._group('nodes') - self.inventory.add_group(nodes_group) - - # gather vm's on nodes - self._get_auth() - hosts = [] - for node in self._get_nodes(): - if not node.get('node'): - continue - - self.inventory.add_host(node['node']) - if node['type'] == 'node': - self.inventory.add_child(nodes_group, node['node']) - - if node['status'] == 'offline': - continue - - # get node IP address - if self.get_option("want_proxmox_nodes_ansible_host"): - ip = self._get_node_ip(node['node']) - self.inventory.set_variable(node['node'], 'ansible_host', ip) - - # add LXC/Qemu groups for the node - for ittype in ('lxc', 'qemu'): - node_type_group = self._group('%s_%s' % (node['node'], ittype)) - self.inventory.add_group(node_type_group) - - # get LXC containers and Qemu VMs for this node - lxc_objects = zip(itertools.repeat('lxc'), self._get_lxc_per_node(node['node'])) - qemu_objects = zip(itertools.repeat('qemu'), self._get_qemu_per_node(node['node'])) - for ittype, item in itertools.chain(lxc_objects, qemu_objects): - name = self._handle_item(node['node'], ittype, item) - if name is not None: - hosts.append(name) - - # gather vm's in pools - self._populate_pool_groups(hosts) - - def parse(self, inventory, loader, path, cache=True): - if not HAS_REQUESTS: - raise AnsibleError('This module requires Python Requests 1.1.0 or higher: ' - 'https://github.com/psf/requests.') - - super(InventoryModule, self).parse(inventory, loader, path) - - # read config from file, this sets 'options' - self._read_config_data(path) - - # read options - self.proxmox_url = self.get_option('url').rstrip('/') - self.proxmox_user = self.get_option('user') - self.proxmox_password = self.get_option('password') - self.cache_key = self.get_cache_key(path) - self.use_cache = cache and self.get_option('cache') - self.host_filters = self.get_option('filters') - self.group_prefix = self.get_option('group_prefix') - self.facts_prefix = self.get_option('facts_prefix') - self.strict = self.get_option('strict') - - # actually populate inventory - self._populate() diff --git a/ansible_collections/community/general/plugins/inventory/scaleway.py b/ansible_collections/community/general/plugins/inventory/scaleway.py deleted file mode 100644 index d48cc97a..00000000 --- a/ansible_collections/community/general/plugins/inventory/scaleway.py +++ /dev/null @@ -1,343 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type - -DOCUMENTATION = r''' - name: scaleway - author: - - Remy Leone (@remyleone) - short_description: Scaleway inventory source - description: - - Get inventory hosts from Scaleway. - requirements: - - PyYAML - options: - plugin: - description: Token that ensures this is a source file for the 'scaleway' plugin. - required: True - choices: ['scaleway', 'community.general.scaleway'] - regions: - description: Filter results on a specific Scaleway region. - type: list - elements: string - default: - - ams1 - - par1 - - par2 - - waw1 - tags: - description: Filter results on a specific tag. - type: list - elements: string - scw_profile: - description: - - The config profile to use in config file. - - By default uses the one specified as C(active_profile) in the config file, or falls back to C(default) if that is not defined. - type: string - version_added: 4.4.0 - oauth_token: - description: - - Scaleway OAuth token. - - If not explicitly defined or in environment variables, it will try to lookup in the scaleway-cli configuration file - (C($SCW_CONFIG_PATH), C($XDG_CONFIG_HOME/scw/config.yaml), or C(~/.config/scw/config.yaml)). - - More details on L(how to generate token, https://www.scaleway.com/en/docs/generate-api-keys/). - env: - # in order of precedence - - name: SCW_TOKEN - - name: SCW_API_KEY - - name: SCW_OAUTH_TOKEN - hostnames: - description: List of preference about what to use as an hostname. - type: list - elements: string - default: - - public_ipv4 - choices: - - public_ipv4 - - private_ipv4 - - public_ipv6 - - hostname - - id - variables: - description: 'Set individual variables: keys are variable names and - values are templates. Any value returned by the - L(Scaleway API, https://developer.scaleway.com/#servers-server-get) - can be used.' - type: dict -''' - -EXAMPLES = r''' -# scaleway_inventory.yml file in YAML format -# Example command line: ansible-inventory --list -i scaleway_inventory.yml - -# use hostname as inventory_hostname -# use the private IP address to connect to the host -plugin: community.general.scaleway -regions: - - ams1 - - par1 -tags: - - foobar -hostnames: - - hostname -variables: - ansible_host: private_ip - state: state - -# use hostname as inventory_hostname and public IP address to connect to the host -plugin: community.general.scaleway -hostnames: - - hostname -regions: - - par1 -variables: - ansible_host: public_ip.address - -# Using static strings as variables -plugin: community.general.scaleway -hostnames: - - hostname -variables: - ansible_host: public_ip.address - ansible_connection: "'ssh'" - ansible_user: "'admin'" -''' - -import os -import json - -try: - import yaml -except ImportError as exc: - YAML_IMPORT_ERROR = exc -else: - YAML_IMPORT_ERROR = None - -from ansible.errors import AnsibleError -from ansible.plugins.inventory import BaseInventoryPlugin, Constructable -from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, parse_pagination_link -from ansible.module_utils.urls import open_url -from ansible.module_utils.common.text.converters import to_native, to_text -from ansible.module_utils.six import raise_from - -import ansible.module_utils.six.moves.urllib.parse as urllib_parse - - -def _fetch_information(token, url): - results = [] - paginated_url = url - while True: - try: - response = open_url(paginated_url, - headers={'X-Auth-Token': token, - 'Content-type': 'application/json'}) - except Exception as e: - raise AnsibleError("Error while fetching %s: %s" % (url, to_native(e))) - try: - raw_json = json.loads(to_text(response.read())) - except ValueError: - raise AnsibleError("Incorrect JSON payload") - - try: - results.extend(raw_json["servers"]) - except KeyError: - raise AnsibleError("Incorrect format from the Scaleway API response") - - link = response.headers['Link'] - if not link: - return results - relations = parse_pagination_link(link) - if 'next' not in relations: - return results - paginated_url = urllib_parse.urljoin(paginated_url, relations['next']) - - -def _build_server_url(api_endpoint): - return "/".join([api_endpoint, "servers"]) - - -def extract_public_ipv4(server_info): - try: - return server_info["public_ip"]["address"] - except (KeyError, TypeError): - return None - - -def extract_private_ipv4(server_info): - try: - return server_info["private_ip"] - except (KeyError, TypeError): - return None - - -def extract_hostname(server_info): - try: - return server_info["hostname"] - except (KeyError, TypeError): - return None - - -def extract_server_id(server_info): - try: - return server_info["id"] - except (KeyError, TypeError): - return None - - -def extract_public_ipv6(server_info): - try: - return server_info["ipv6"]["address"] - except (KeyError, TypeError): - return None - - -def extract_tags(server_info): - try: - return server_info["tags"] - except (KeyError, TypeError): - return None - - -def extract_zone(server_info): - try: - return server_info["location"]["zone_id"] - except (KeyError, TypeError): - return None - - -extractors = { - "public_ipv4": extract_public_ipv4, - "private_ipv4": extract_private_ipv4, - "public_ipv6": extract_public_ipv6, - "hostname": extract_hostname, - "id": extract_server_id -} - - -class InventoryModule(BaseInventoryPlugin, Constructable): - NAME = 'community.general.scaleway' - - def _fill_host_variables(self, host, server_info): - targeted_attributes = ( - "arch", - "commercial_type", - "id", - "organization", - "state", - "hostname", - ) - for attribute in targeted_attributes: - self.inventory.set_variable(host, attribute, server_info[attribute]) - - self.inventory.set_variable(host, "tags", server_info["tags"]) - - if extract_public_ipv6(server_info=server_info): - self.inventory.set_variable(host, "public_ipv6", extract_public_ipv6(server_info=server_info)) - - if extract_public_ipv4(server_info=server_info): - self.inventory.set_variable(host, "public_ipv4", extract_public_ipv4(server_info=server_info)) - - if extract_private_ipv4(server_info=server_info): - self.inventory.set_variable(host, "private_ipv4", extract_private_ipv4(server_info=server_info)) - - def _get_zones(self, config_zones): - return set(SCALEWAY_LOCATION.keys()).intersection(config_zones) - - def match_groups(self, server_info, tags): - server_zone = extract_zone(server_info=server_info) - server_tags = extract_tags(server_info=server_info) - - # If a server does not have a zone, it means it is archived - if server_zone is None: - return set() - - # If no filtering is defined, all tags are valid groups - if tags is None: - return set(server_tags).union((server_zone,)) - - matching_tags = set(server_tags).intersection(tags) - - if not matching_tags: - return set() - return matching_tags.union((server_zone,)) - - def _filter_host(self, host_infos, hostname_preferences): - - for pref in hostname_preferences: - if extractors[pref](host_infos): - return extractors[pref](host_infos) - - return None - - def do_zone_inventory(self, zone, token, tags, hostname_preferences): - self.inventory.add_group(zone) - zone_info = SCALEWAY_LOCATION[zone] - - url = _build_server_url(zone_info["api_endpoint"]) - raw_zone_hosts_infos = _fetch_information(url=url, token=token) - - for host_infos in raw_zone_hosts_infos: - - hostname = self._filter_host(host_infos=host_infos, - hostname_preferences=hostname_preferences) - - # No suitable hostname were found in the attributes and the host won't be in the inventory - if not hostname: - continue - - groups = self.match_groups(host_infos, tags) - - for group in groups: - self.inventory.add_group(group=group) - self.inventory.add_host(group=group, host=hostname) - self._fill_host_variables(host=hostname, server_info=host_infos) - - # Composed variables - self._set_composite_vars(self.get_option('variables'), host_infos, hostname, strict=False) - - def get_oauth_token(self): - oauth_token = self.get_option('oauth_token') - - if 'SCW_CONFIG_PATH' in os.environ: - scw_config_path = os.getenv('SCW_CONFIG_PATH') - elif 'XDG_CONFIG_HOME' in os.environ: - scw_config_path = os.path.join(os.getenv('XDG_CONFIG_HOME'), 'scw', 'config.yaml') - else: - scw_config_path = os.path.join(os.path.expanduser('~'), '.config', 'scw', 'config.yaml') - - if not oauth_token and os.path.exists(scw_config_path): - with open(scw_config_path) as fh: - scw_config = yaml.safe_load(fh) - ansible_profile = self.get_option('scw_profile') - - if ansible_profile: - active_profile = ansible_profile - else: - active_profile = scw_config.get('active_profile', 'default') - - if active_profile == 'default': - oauth_token = scw_config.get('secret_key') - else: - oauth_token = scw_config['profiles'][active_profile].get('secret_key') - - return oauth_token - - def parse(self, inventory, loader, path, cache=True): - if YAML_IMPORT_ERROR: - raise_from(AnsibleError('PyYAML is probably missing'), YAML_IMPORT_ERROR) - super(InventoryModule, self).parse(inventory, loader, path) - self._read_config_data(path=path) - - config_zones = self.get_option("regions") - tags = self.get_option("tags") - token = self.get_oauth_token() - if not token: - raise AnsibleError("'oauth_token' value is null, you must configure it either in inventory, envvars or scaleway-cli config.") - hostname_preference = self.get_option("hostnames") - - for zone in self._get_zones(config_zones): - self.do_zone_inventory(zone=zone, token=token, tags=tags, hostname_preferences=hostname_preference) diff --git a/ansible_collections/community/general/plugins/lookup/cartesian.py b/ansible_collections/community/general/plugins/lookup/cartesian.py deleted file mode 100644 index 98043eba..00000000 --- a/ansible_collections/community/general/plugins/lookup/cartesian.py +++ /dev/null @@ -1,78 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2013, Bradley Young -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: cartesian - short_description: returns the cartesian product of lists - description: - - Takes the input lists and returns a list that represents the product of the input lists. - - It is clearer with an example, it turns [1, 2, 3], [a, b] into [1, a], [1, b], [2, a], [2, b], [3, a], [3, b]. - You can see the exact syntax in the examples section. - options: - _raw: - description: - - a set of lists - required: True -''' - -EXAMPLES = """ -- name: Example of the change in the description - ansible.builtin.debug: - msg: "{{ lookup('community.general.cartesian', [1,2,3], [a, b])}}" - -- name: loops over the cartesian product of the supplied lists - ansible.builtin.debug: - msg: "{{item}}" - with_community.general.cartesian: - - "{{list1}}" - - "{{list2}}" - - [1,2,3,4,5,6] -""" - -RETURN = """ - _list: - description: - - list of lists composed of elements of the input lists - type: list - elements: list -""" - -from itertools import product - -from ansible.errors import AnsibleError -from ansible.plugins.lookup import LookupBase -from ansible.utils.listify import listify_lookup_plugin_terms - - -class LookupModule(LookupBase): - """ - Create the cartesian product of lists - """ - - def _lookup_variables(self, terms): - """ - Turn this: - terms == ["1,2,3", "a,b"] - into this: - terms == [[1,2,3], [a, b]] - """ - results = [] - for x in terms: - intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader) - results.append(intermediate) - return results - - def run(self, terms, variables=None, **kwargs): - - terms = self._lookup_variables(terms) - - my_list = terms[:] - if len(my_list) == 0: - raise AnsibleError("with_cartesian requires at least one element in each list") - - return [self._flatten(x) for x in product(*my_list)] diff --git a/ansible_collections/community/general/plugins/lookup/consul_kv.py b/ansible_collections/community/general/plugins/lookup/consul_kv.py deleted file mode 100644 index 3ad03bfe..00000000 --- a/ansible_collections/community/general/plugins/lookup/consul_kv.py +++ /dev/null @@ -1,193 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2015, Steve Gargan -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type - -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: consul_kv - short_description: Fetch metadata from a Consul key value store. - description: - - Lookup metadata for a playbook from the key value store in a Consul cluster. - Values can be easily set in the kv store with simple rest commands - - C(curl -X PUT -d 'some-value' http://localhost:8500/v1/kv/ansible/somedata) - requirements: - - 'python-consul python library U(https://python-consul.readthedocs.io/en/latest/#installation)' - options: - _raw: - description: List of key(s) to retrieve. - type: list - elements: string - recurse: - type: boolean - description: If true, will retrieve all the values that have the given key as prefix. - default: False - index: - description: - - If the key has a value with the specified index then this is returned allowing access to historical values. - datacenter: - description: - - Retrieve the key from a consul datacenter other than the default for the consul host. - token: - description: The acl token to allow access to restricted values. - host: - default: localhost - description: - - The target to connect to, must be a resolvable address. - Will be determined from C(ANSIBLE_CONSUL_URL) if that is set. - - "C(ANSIBLE_CONSUL_URL) should look like this: C(https://my.consul.server:8500)" - env: - - name: ANSIBLE_CONSUL_URL - ini: - - section: lookup_consul - key: host - port: - description: - - The port of the target host to connect to. - - If you use C(ANSIBLE_CONSUL_URL) this value will be used from there. - default: 8500 - scheme: - default: http - description: - - Whether to use http or https. - - If you use C(ANSIBLE_CONSUL_URL) this value will be used from there. - validate_certs: - default: True - description: Whether to verify the ssl connection or not. - env: - - name: ANSIBLE_CONSUL_VALIDATE_CERTS - ini: - - section: lookup_consul - key: validate_certs - client_cert: - description: The client cert to verify the ssl connection. - env: - - name: ANSIBLE_CONSUL_CLIENT_CERT - ini: - - section: lookup_consul - key: client_cert - url: - description: "The target to connect to, should look like this: C(https://my.consul.server:8500)." - type: str - version_added: 1.0.0 - env: - - name: ANSIBLE_CONSUL_URL - ini: - - section: lookup_consul - key: url -''' - -EXAMPLES = """ - - ansible.builtin.debug: - msg: 'key contains {{item}}' - with_community.general.consul_kv: - - 'key/to/retrieve' - - - name: Parameters can be provided after the key be more specific about what to retrieve - ansible.builtin.debug: - msg: 'key contains {{item}}' - with_community.general.consul_kv: - - 'key/to recurse=true token=E6C060A9-26FB-407A-B83E-12DDAFCB4D98' - - - name: retrieving a KV from a remote cluster on non default port - ansible.builtin.debug: - msg: "{{ lookup('community.general.consul_kv', 'my/key', host='10.10.10.10', port='2000') }}" -""" - -RETURN = """ - _raw: - description: - - Value(s) stored in consul. - type: dict -""" - -import os -from ansible.module_utils.six.moves.urllib.parse import urlparse -from ansible.errors import AnsibleError, AnsibleAssertionError -from ansible.plugins.lookup import LookupBase -from ansible.module_utils.common.text.converters import to_text - -try: - import consul - - HAS_CONSUL = True -except ImportError as e: - HAS_CONSUL = False - - -class LookupModule(LookupBase): - - def run(self, terms, variables=None, **kwargs): - - if not HAS_CONSUL: - raise AnsibleError( - 'python-consul is required for consul_kv lookup. see http://python-consul.readthedocs.org/en/latest/#installation') - - # get options - self.set_options(direct=kwargs) - - scheme = self.get_option('scheme') - host = self.get_option('host') - port = self.get_option('port') - url = self.get_option('url') - if url is not None: - u = urlparse(url) - if u.scheme: - scheme = u.scheme - host = u.hostname - if u.port is not None: - port = u.port - - validate_certs = self.get_option('validate_certs') - client_cert = self.get_option('client_cert') - - values = [] - try: - for term in terms: - params = self.parse_params(term) - consul_api = consul.Consul(host=host, port=port, scheme=scheme, verify=validate_certs, cert=client_cert) - - results = consul_api.kv.get(params['key'], - token=params['token'], - index=params['index'], - recurse=params['recurse'], - dc=params['datacenter']) - if results[1]: - # responds with a single or list of result maps - if isinstance(results[1], list): - for r in results[1]: - values.append(to_text(r['Value'])) - else: - values.append(to_text(results[1]['Value'])) - except Exception as e: - raise AnsibleError( - "Error locating '%s' in kv store. Error was %s" % (term, e)) - - return values - - def parse_params(self, term): - params = term.split(' ') - - paramvals = { - 'key': params[0], - 'token': self.get_option('token'), - 'recurse': self.get_option('recurse'), - 'index': self.get_option('index'), - 'datacenter': self.get_option('datacenter') - } - - # parameters specified? - try: - for param in params[1:]: - if param and len(param) > 0: - name, value = param.split('=') - if name not in paramvals: - raise AnsibleAssertionError("%s not a valid consul lookup parameter" % name) - paramvals[name] = value - except (ValueError, AssertionError) as e: - raise AnsibleError(e) - - return paramvals diff --git a/ansible_collections/community/general/plugins/lookup/credstash.py b/ansible_collections/community/general/plugins/lookup/credstash.py deleted file mode 100644 index 143c66c1..00000000 --- a/ansible_collections/community/general/plugins/lookup/credstash.py +++ /dev/null @@ -1,126 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2015, Ensighten -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: credstash - short_description: retrieve secrets from Credstash on AWS - requirements: - - credstash (python library) - description: - - "Credstash is a small utility for managing secrets using AWS's KMS and DynamoDB: https://github.com/fugue/credstash" - options: - _terms: - description: term or list of terms to lookup in the credit store - type: list - elements: string - required: true - table: - description: name of the credstash table to query - default: 'credential-store' - version: - description: Credstash version - region: - description: AWS region - profile_name: - description: AWS profile to use for authentication - env: - - name: AWS_PROFILE - aws_access_key_id: - description: AWS access key ID - env: - - name: AWS_ACCESS_KEY_ID - aws_secret_access_key: - description: AWS access key - env: - - name: AWS_SECRET_ACCESS_KEY - aws_session_token: - description: AWS session token - env: - - name: AWS_SESSION_TOKEN -''' - -EXAMPLES = """ -- name: first use credstash to store your secrets - ansible.builtin.shell: credstash put my-github-password secure123 - -- name: "Test credstash lookup plugin -- get my github password" - ansible.builtin.debug: - msg: "Credstash lookup! {{ lookup('community.general.credstash', 'my-github-password') }}" - -- name: "Test credstash lookup plugin -- get my other password from us-west-1" - ansible.builtin.debug: - msg: "Credstash lookup! {{ lookup('community.general.credstash', 'my-other-password', region='us-west-1') }}" - -- name: "Test credstash lookup plugin -- get the company's github password" - ansible.builtin.debug: - msg: "Credstash lookup! {{ lookup('community.general.credstash', 'company-github-password', table='company-passwords') }}" - -- name: Example play using the 'context' feature - hosts: localhost - vars: - context: - app: my_app - environment: production - tasks: - - - name: "Test credstash lookup plugin -- get the password with a context passed as a variable" - ansible.builtin.debug: - msg: "{{ lookup('community.general.credstash', 'some-password', context=context) }}" - - - name: "Test credstash lookup plugin -- get the password with a context defined here" - ansible.builtin.debug: - msg: "{{ lookup('community.general.credstash', 'some-password', context=dict(app='my_app', environment='production')) }}" -""" - -RETURN = """ - _raw: - description: - - Value(s) stored in Credstash. - type: str -""" - -import os - -from ansible.errors import AnsibleError -from ansible.plugins.lookup import LookupBase - -CREDSTASH_INSTALLED = False - -try: - import credstash - CREDSTASH_INSTALLED = True -except ImportError: - CREDSTASH_INSTALLED = False - - -class LookupModule(LookupBase): - def run(self, terms, variables, **kwargs): - - if not CREDSTASH_INSTALLED: - raise AnsibleError('The credstash lookup plugin requires credstash to be installed.') - - ret = [] - for term in terms: - try: - version = kwargs.pop('version', '') - region = kwargs.pop('region', None) - table = kwargs.pop('table', 'credential-store') - profile_name = kwargs.pop('profile_name', os.getenv('AWS_PROFILE', None)) - aws_access_key_id = kwargs.pop('aws_access_key_id', os.getenv('AWS_ACCESS_KEY_ID', None)) - aws_secret_access_key = kwargs.pop('aws_secret_access_key', os.getenv('AWS_SECRET_ACCESS_KEY', None)) - aws_session_token = kwargs.pop('aws_session_token', os.getenv('AWS_SESSION_TOKEN', None)) - kwargs_pass = {'profile_name': profile_name, 'aws_access_key_id': aws_access_key_id, - 'aws_secret_access_key': aws_secret_access_key, 'aws_session_token': aws_session_token} - val = credstash.getSecret(term, version, region, table, context=kwargs, **kwargs_pass) - except credstash.ItemNotFound: - raise AnsibleError('Key {0} not found'.format(term)) - except Exception as e: - raise AnsibleError('Encountered exception while fetching {0}: {1}'.format(term, e)) - ret.append(val) - - return ret diff --git a/ansible_collections/community/general/plugins/lookup/dig.py b/ansible_collections/community/general/plugins/lookup/dig.py deleted file mode 100644 index 19ded61d..00000000 --- a/ansible_collections/community/general/plugins/lookup/dig.py +++ /dev/null @@ -1,368 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2015, Jan-Piet Mens -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - name: dig - author: Jan-Piet Mens (@jpmens) - short_description: query DNS using the dnspython library - requirements: - - dnspython (python library, http://www.dnspython.org/) - description: - - The dig lookup runs queries against DNS servers to retrieve DNS records for a specific name (FQDN - fully qualified domain name). - It is possible to lookup any DNS record in this manner. - - There is a couple of different syntaxes that can be used to specify what record should be retrieved, and for which name. - It is also possible to explicitly specify the DNS server(s) to use for lookups. - - In its simplest form, the dig lookup plugin can be used to retrieve an IPv4 address (DNS A record) associated with FQDN - - In addition to (default) A record, it is also possible to specify a different record type that should be queried. - This can be done by either passing-in additional parameter of format qtype=TYPE to the dig lookup, or by appending /TYPE to the FQDN being queried. - - If multiple values are associated with the requested record, the results will be returned as a comma-separated list. - In such cases you may want to pass option wantlist=True to the plugin, which will result in the record values being returned as a list - over which you can iterate later on. - - By default, the lookup will rely on system-wide configured DNS servers for performing the query. - It is also possible to explicitly specify DNS servers to query using the @DNS_SERVER_1,DNS_SERVER_2,...,DNS_SERVER_N notation. - This needs to be passed-in as an additional parameter to the lookup - options: - _terms: - description: domain(s) to query - qtype: - description: record type to query - default: 'A' - choices: [A, ALL, AAAA, CNAME, DNAME, DLV, DNSKEY, DS, HINFO, LOC, MX, NAPTR, NS, NSEC3PARAM, PTR, RP, RRSIG, SOA, SPF, SRV, SSHFP, TLSA, TXT] - flat: - description: If 0 each record is returned as a dictionary, otherwise a string - default: 1 - retry_servfail: - description: Retry a nameserver if it returns SERVFAIL. - default: false - type: bool - version_added: 3.6.0 - notes: - - ALL is not a record per-se, merely the listed fields are available for any record results you retrieve in the form of a dictionary. - - While the 'dig' lookup plugin supports anything which dnspython supports out of the box, only a subset can be converted into a dictionary. - - If you need to obtain the AAAA record (IPv6 address), you must specify the record type explicitly. - Syntax for specifying the record type is shown in the examples below. - - The trailing dot in most of the examples listed is purely optional, but is specified for completeness/correctness sake. -''' - -EXAMPLES = """ -- name: Simple A record (IPV4 address) lookup for example.com - ansible.builtin.debug: - msg: "{{ lookup('community.general.dig', 'example.com.')}}" - -- name: "The TXT record for example.org." - ansible.builtin.debug: - msg: "{{ lookup('community.general.dig', 'example.org.', 'qtype=TXT') }}" - -- name: "The TXT record for example.org, alternative syntax." - ansible.builtin.debug: - msg: "{{ lookup('community.general.dig', 'example.org./TXT') }}" - -- name: use in a loop - ansible.builtin.debug: - msg: "MX record for gmail.com {{ item }}" - with_items: "{{ lookup('community.general.dig', 'gmail.com./MX', wantlist=True) }}" - -- ansible.builtin.debug: - msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '192.0.2.5/PTR') }}" -- ansible.builtin.debug: - msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '5.2.0.192.in-addr.arpa./PTR') }}" -- ansible.builtin.debug: - msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '5.2.0.192.in-addr.arpa.', 'qtype=PTR') }}" -- ansible.builtin.debug: - msg: "Querying 198.51.100.23 for IPv4 address for example.com. produces {{ lookup('dig', 'example.com', '@198.51.100.23') }}" - -- ansible.builtin.debug: - msg: "XMPP service for gmail.com. is available at {{ item.target }} on port {{ item.port }}" - with_items: "{{ lookup('community.general.dig', '_xmpp-server._tcp.gmail.com./SRV', 'flat=0', wantlist=True) }}" - -- name: Retry nameservers that return SERVFAIL - ansible.builtin.debug: - msg: "{{ lookup('community.general.dig', 'example.org./A', 'retry_servfail=True') }}" -""" - -RETURN = """ - _list: - description: - - List of composed strings or dictionaries with key and value - If a dictionary, fields shows the keys returned depending on query type - type: list - elements: raw - contains: - ALL: - description: - - owner, ttl, type - A: - description: - - address - AAAA: - description: - - address - CNAME: - description: - - target - DNAME: - description: - - target - DLV: - description: - - algorithm, digest_type, key_tag, digest - DNSKEY: - description: - - flags, algorithm, protocol, key - DS: - description: - - algorithm, digest_type, key_tag, digest - HINFO: - description: - - cpu, os - LOC: - description: - - latitude, longitude, altitude, size, horizontal_precision, vertical_precision - MX: - description: - - preference, exchange - NAPTR: - description: - - order, preference, flags, service, regexp, replacement - NS: - description: - - target - NSEC3PARAM: - description: - - algorithm, flags, iterations, salt - PTR: - description: - - target - RP: - description: - - mbox, txt - SOA: - description: - - mname, rname, serial, refresh, retry, expire, minimum - SPF: - description: - - strings - SRV: - description: - - priority, weight, port, target - SSHFP: - description: - - algorithm, fp_type, fingerprint - TLSA: - description: - - usage, selector, mtype, cert - TXT: - description: - - strings -""" - -from ansible.errors import AnsibleError -from ansible.plugins.lookup import LookupBase -from ansible.module_utils.common.text.converters import to_native -import socket - -try: - import dns.exception - import dns.name - import dns.resolver - import dns.reversename - import dns.rdataclass - from dns.rdatatype import (A, AAAA, CNAME, DLV, DNAME, DNSKEY, DS, HINFO, LOC, - MX, NAPTR, NS, NSEC3PARAM, PTR, RP, SOA, SPF, SRV, SSHFP, TLSA, TXT) - HAVE_DNS = True -except ImportError: - HAVE_DNS = False - - -def make_rdata_dict(rdata): - ''' While the 'dig' lookup plugin supports anything which dnspython supports - out of the box, the following supported_types list describes which - DNS query types we can convert to a dict. - - Note: adding support for RRSIG is hard work. :) - ''' - supported_types = { - A: ['address'], - AAAA: ['address'], - CNAME: ['target'], - DNAME: ['target'], - DLV: ['algorithm', 'digest_type', 'key_tag', 'digest'], - DNSKEY: ['flags', 'algorithm', 'protocol', 'key'], - DS: ['algorithm', 'digest_type', 'key_tag', 'digest'], - HINFO: ['cpu', 'os'], - LOC: ['latitude', 'longitude', 'altitude', 'size', 'horizontal_precision', 'vertical_precision'], - MX: ['preference', 'exchange'], - NAPTR: ['order', 'preference', 'flags', 'service', 'regexp', 'replacement'], - NS: ['target'], - NSEC3PARAM: ['algorithm', 'flags', 'iterations', 'salt'], - PTR: ['target'], - RP: ['mbox', 'txt'], - # RRSIG: ['algorithm', 'labels', 'original_ttl', 'expiration', 'inception', 'signature'], - SOA: ['mname', 'rname', 'serial', 'refresh', 'retry', 'expire', 'minimum'], - SPF: ['strings'], - SRV: ['priority', 'weight', 'port', 'target'], - SSHFP: ['algorithm', 'fp_type', 'fingerprint'], - TLSA: ['usage', 'selector', 'mtype', 'cert'], - TXT: ['strings'], - } - - rd = {} - - if rdata.rdtype in supported_types: - fields = supported_types[rdata.rdtype] - for f in fields: - val = rdata.__getattribute__(f) - - if isinstance(val, dns.name.Name): - val = dns.name.Name.to_text(val) - - if rdata.rdtype == DLV and f == 'digest': - val = dns.rdata._hexify(rdata.digest).replace(' ', '') - if rdata.rdtype == DS and f == 'digest': - val = dns.rdata._hexify(rdata.digest).replace(' ', '') - if rdata.rdtype == DNSKEY and f == 'key': - val = dns.rdata._base64ify(rdata.key).replace(' ', '') - if rdata.rdtype == NSEC3PARAM and f == 'salt': - val = dns.rdata._hexify(rdata.salt).replace(' ', '') - if rdata.rdtype == SSHFP and f == 'fingerprint': - val = dns.rdata._hexify(rdata.fingerprint).replace(' ', '') - if rdata.rdtype == TLSA and f == 'cert': - val = dns.rdata._hexify(rdata.cert).replace(' ', '') - - rd[f] = val - - return rd - - -# ============================================================== -# dig: Lookup DNS records -# -# -------------------------------------------------------------- - -class LookupModule(LookupBase): - - def run(self, terms, variables=None, **kwargs): - - ''' - terms contains a string with things to `dig' for. We support the - following formats: - example.com # A record - example.com qtype=A # same - example.com/TXT # specific qtype - example.com qtype=txt # same - 192.0.2.23/PTR # reverse PTR - ^^ shortcut for 23.2.0.192.in-addr.arpa/PTR - example.net/AAAA @nameserver # query specified server - ^^^ can be comma-sep list of names/addresses - - ... flat=0 # returns a dict; default is 1 == string - ''' - - if HAVE_DNS is False: - raise AnsibleError("The dig lookup requires the python 'dnspython' library and it is not installed") - - # Create Resolver object so that we can set NS if necessary - myres = dns.resolver.Resolver(configure=True) - edns_size = 4096 - myres.use_edns(0, ednsflags=dns.flags.DO, payload=edns_size) - - domain = None - qtype = 'A' - flat = True - rdclass = dns.rdataclass.from_text('IN') - - for t in terms: - if t.startswith('@'): # e.g. "@10.0.1.2,192.0.2.1" is ok. - nsset = t[1:].split(',') - for ns in nsset: - nameservers = [] - # Check if we have a valid IP address. If so, use that, otherwise - # try to resolve name to address using system's resolver. If that - # fails we bail out. - try: - socket.inet_aton(ns) - nameservers.append(ns) - except Exception: - try: - nsaddr = dns.resolver.query(ns)[0].address - nameservers.append(nsaddr) - except Exception as e: - raise AnsibleError("dns lookup NS: %s" % to_native(e)) - myres.nameservers = nameservers - continue - if '=' in t: - try: - opt, arg = t.split('=') - except Exception: - pass - - if opt == 'qtype': - qtype = arg.upper() - elif opt == 'flat': - flat = int(arg) - elif opt == 'class': - try: - rdclass = dns.rdataclass.from_text(arg) - except Exception as e: - raise AnsibleError("dns lookup illegal CLASS: %s" % to_native(e)) - elif opt == 'retry_servfail': - myres.retry_servfail = bool(arg) - - continue - - if '/' in t: - try: - domain, qtype = t.split('/') - except Exception: - domain = t - else: - domain = t - - # print "--- domain = {0} qtype={1} rdclass={2}".format(domain, qtype, rdclass) - - ret = [] - - if qtype.upper() == 'PTR': - try: - n = dns.reversename.from_address(domain) - domain = n.to_text() - except dns.exception.SyntaxError: - pass - except Exception as e: - raise AnsibleError("dns.reversename unhandled exception %s" % to_native(e)) - - try: - answers = myres.query(domain, qtype, rdclass=rdclass) - for rdata in answers: - s = rdata.to_text() - if qtype.upper() == 'TXT': - s = s[1:-1] # Strip outside quotes on TXT rdata - - if flat: - ret.append(s) - else: - try: - rd = make_rdata_dict(rdata) - rd['owner'] = answers.canonical_name.to_text() - rd['type'] = dns.rdatatype.to_text(rdata.rdtype) - rd['ttl'] = answers.rrset.ttl - rd['class'] = dns.rdataclass.to_text(rdata.rdclass) - - ret.append(rd) - except Exception as e: - ret.append(str(e)) - - except dns.resolver.NXDOMAIN: - ret.append('NXDOMAIN') - except dns.resolver.NoAnswer: - ret.append("") - except dns.resolver.Timeout: - ret.append('') - except dns.exception.DNSException as e: - raise AnsibleError("dns.resolver unhandled exception %s" % to_native(e)) - - return ret diff --git a/ansible_collections/community/general/plugins/lookup/etcd3.py b/ansible_collections/community/general/plugins/lookup/etcd3.py deleted file mode 100644 index a34fae7b..00000000 --- a/ansible_collections/community/general/plugins/lookup/etcd3.py +++ /dev/null @@ -1,229 +0,0 @@ -# -*- coding: utf-8 -*- -# -# (c) 2020, SCC France, Eric Belhomme -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' - author: - - Eric Belhomme (@eric-belhomme) - version_added: '0.2.0' - name: etcd3 - short_description: Get key values from etcd3 server - description: - - Retrieves key values and/or key prefixes from etcd3 server using its native gRPC API. - - Try to reuse M(community.general.etcd3) options for connection parameters, but add support for some C(ETCDCTL_*) environment variables. - - See U(https://github.com/etcd-io/etcd/tree/master/Documentation/op-guide) for etcd overview. - - options: - _terms: - description: - - The list of keys (or key prefixes) to look up on the etcd3 server. - type: list - elements: str - required: True - prefix: - description: - - Look for key or prefix key. - type: bool - default: False - endpoints: - description: - - Counterpart of C(ETCDCTL_ENDPOINTS) environment variable. - Specify the etcd3 connection with and URL form eg. C(https://hostname:2379) or C(:) form. - - The C(host) part is overwritten by I(host) option, if defined. - - The C(port) part is overwritten by I(port) option, if defined. - env: - - name: ETCDCTL_ENDPOINTS - default: '127.0.0.1:2379' - type: str - host: - description: - - etcd3 listening client host. - - Takes precedence over I(endpoints). - type: str - port: - description: - - etcd3 listening client port. - - Takes precedence over I(endpoints). - type: int - ca_cert: - description: - - etcd3 CA authority. - env: - - name: ETCDCTL_CACERT - type: str - cert_cert: - description: - - etcd3 client certificate. - env: - - name: ETCDCTL_CERT - type: str - cert_key: - description: - - etcd3 client private key. - env: - - name: ETCDCTL_KEY - type: str - timeout: - description: - - Client timeout. - default: 60 - env: - - name: ETCDCTL_DIAL_TIMEOUT - type: int - user: - description: - - Authenticated user name. - env: - - name: ETCDCTL_USER - type: str - password: - description: - - Authenticated user password. - env: - - name: ETCDCTL_PASSWORD - type: str - - notes: - - I(host) and I(port) options take precedence over (endpoints) option. - - The recommended way to connect to etcd3 server is using C(ETCDCTL_ENDPOINT) - environment variable and keep I(endpoints), I(host), and I(port) unused. - seealso: - - module: community.general.etcd3 - - ref: ansible_collections.community.general.etcd_lookup - description: The etcd v2 lookup. - - requirements: - - "etcd3 >= 0.10" -''' - -EXAMPLES = ''' -- name: "a value from a locally running etcd" - ansible.builtin.debug: - msg: "{{ lookup('community.general.etcd3', 'foo/bar') }}" - -- name: "values from multiple folders on a locally running etcd" - ansible.builtin.debug: - msg: "{{ lookup('community.general.etcd3', 'foo', 'bar', 'baz') }}" - -- name: "look for a key prefix" - ansible.builtin.debug: - msg: "{{ lookup('community.general.etcd3', '/foo/bar', prefix=True) }}" - -- name: "connect to etcd3 with a client certificate" - ansible.builtin.debug: - msg: "{{ lookup('community.general.etcd3', 'foo/bar', cert_cert='/etc/ssl/etcd/client.pem', cert_key='/etc/ssl/etcd/client.key') }}" -''' - -RETURN = ''' - _raw: - description: - - List of keys and associated values. - type: list - elements: dict - contains: - key: - description: The element's key. - type: str - value: - description: The element's value. - type: str -''' - -import re - -from ansible.plugins.lookup import LookupBase -from ansible.utils.display import Display -from ansible.module_utils.basic import missing_required_lib -from ansible.module_utils.common.text.converters import to_native -from ansible.plugins.lookup import LookupBase -from ansible.errors import AnsibleError, AnsibleLookupError - -try: - import etcd3 - HAS_ETCD = True -except ImportError: - HAS_ETCD = False - -display = Display() - -etcd3_cnx_opts = ( - 'host', - 'port', - 'ca_cert', - 'cert_key', - 'cert_cert', - 'timeout', - 'user', - 'password', - # 'grpc_options' Etcd3Client() option currently not supported by lookup module (maybe in future ?) -) - - -def etcd3_client(client_params): - try: - etcd = etcd3.client(**client_params) - etcd.status() - except Exception as exp: - raise AnsibleLookupError('Cannot connect to etcd cluster: %s' % (to_native(exp))) - return etcd - - -class LookupModule(LookupBase): - - def run(self, terms, variables, **kwargs): - - self.set_options(var_options=variables, direct=kwargs) - - if not HAS_ETCD: - display.error(missing_required_lib('etcd3')) - return None - - # create the etcd3 connection parameters dict to pass to etcd3 class - client_params = {} - - # etcd3 class expects host and port as connection parameters, so endpoints - # must be mangled a bit to fit in this scheme. - # so here we use a regex to extract server and port - match = re.compile( - r'^(https?://)?(?P(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})|([-_\d\w\.]+))(:(?P\d{1,5}))?/?$' - ).match(self.get_option('endpoints')) - if match: - if match.group('host'): - client_params['host'] = match.group('host') - if match.group('port'): - client_params['port'] = match.group('port') - - for opt in etcd3_cnx_opts: - if self.get_option(opt): - client_params[opt] = self.get_option(opt) - - cnx_log = dict(client_params) - if 'password' in cnx_log: - cnx_log['password'] = '' - display.verbose("etcd3 connection parameters: %s" % cnx_log) - - # connect to etcd3 server - etcd = etcd3_client(client_params) - - ret = [] - # we can pass many keys to lookup - for term in terms: - if self.get_option('prefix'): - try: - for val, meta in etcd.get_prefix(term): - if val and meta: - ret.append({'key': to_native(meta.key), 'value': to_native(val)}) - except Exception as exp: - display.warning('Caught except during etcd3.get_prefix: %s' % (to_native(exp))) - else: - try: - val, meta = etcd.get(term) - if val and meta: - ret.append({'key': to_native(meta.key), 'value': to_native(val)}) - except Exception as exp: - display.warning('Caught except during etcd3.get: %s' % (to_native(exp))) - return ret diff --git a/ansible_collections/community/general/plugins/lookup/flattened.py b/ansible_collections/community/general/plugins/lookup/flattened.py deleted file mode 100644 index edc546ff..00000000 --- a/ansible_collections/community/general/plugins/lookup/flattened.py +++ /dev/null @@ -1,85 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2013, Serge van Ginderachter -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - name: flattened - author: Serge van Ginderachter (!UNKNOWN) - short_description: return single list completely flattened - description: - - given one or more lists, this lookup will flatten any list elements found recursively until only 1 list is left. - options: - _terms: - description: lists to flatten - required: True - notes: - - unlike 'items' which only flattens 1 level, this plugin will continue to flatten until it cannot find lists anymore. - - aka highlander plugin, there can only be one (list). -''' - -EXAMPLES = """ -- name: "'unnest' all elements into single list" - ansible.builtin.debug: - msg: "all in one list {{lookup('community.general.flattened', [1,2,3,[5,6]], ['a','b','c'], [[5,6,1,3], [34,'a','b','c']])}}" -""" - -RETURN = """ - _raw: - description: - - flattened list - type: list -""" -from ansible.errors import AnsibleError -from ansible.module_utils.six import string_types -from ansible.plugins.lookup import LookupBase -from ansible.utils.listify import listify_lookup_plugin_terms - - -class LookupModule(LookupBase): - - def _check_list_of_one_list(self, term): - # make sure term is not a list of one (list of one..) item - # return the final non list item if so - - if isinstance(term, list) and len(term) == 1: - term = term[0] - if isinstance(term, list): - term = self._check_list_of_one_list(term) - - return term - - def _do_flatten(self, terms, variables): - - ret = [] - for term in terms: - term = self._check_list_of_one_list(term) - - if term == 'None' or term == 'null': - # ignore undefined items - break - - if isinstance(term, string_types): - # convert a variable to a list - term2 = listify_lookup_plugin_terms(term, templar=self._templar, loader=self._loader) - # but avoid converting a plain string to a list of one string - if term2 != [term]: - term = term2 - - if isinstance(term, list): - # if it's a list, check recursively for items that are a list - term = self._do_flatten(term, variables) - ret.extend(term) - else: - ret.append(term) - - return ret - - def run(self, terms, variables, **kwargs): - - if not isinstance(terms, list): - raise AnsibleError("with_flattened expects a list") - - return self._do_flatten(terms, variables) diff --git a/ansible_collections/community/general/plugins/lookup/hiera.py b/ansible_collections/community/general/plugins/lookup/hiera.py deleted file mode 100644 index 5b440469..00000000 --- a/ansible_collections/community/general/plugins/lookup/hiera.py +++ /dev/null @@ -1,90 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2017, Juan Manuel Parrilla -# (c) 2012-17 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - author: - - Juan Manuel Parrilla (@jparrill) - name: hiera - short_description: get info from hiera data - requirements: - - hiera (command line utility) - description: - - Retrieves data from an Puppetmaster node using Hiera as ENC - options: - _hiera_key: - description: - - The list of keys to lookup on the Puppetmaster - type: list - elements: string - required: True - _bin_file: - description: - - Binary file to execute Hiera - default: '/usr/bin/hiera' - env: - - name: ANSIBLE_HIERA_BIN - _hierarchy_file: - description: - - File that describes the hierarchy of Hiera - default: '/etc/hiera.yaml' - env: - - name: ANSIBLE_HIERA_CFG -# FIXME: incomplete options .. _terms? environment/fqdn? -''' - -EXAMPLES = """ -# All this examples depends on hiera.yml that describes the hierarchy - -- name: "a value from Hiera 'DB'" - ansible.builtin.debug: - msg: "{{ lookup('community.general.hiera', 'foo') }}" - -- name: "a value from a Hiera 'DB' on other environment" - ansible.builtin.debug: - msg: "{{ lookup('community.general.hiera', 'foo environment=production') }}" - -- name: "a value from a Hiera 'DB' for a concrete node" - ansible.builtin.debug: - msg: "{{ lookup('community.general.hiera', 'foo fqdn=puppet01.localdomain') }}" -""" - -RETURN = """ - _raw: - description: - - a value associated with input key - type: list - elements: str -""" - -import os - -from ansible.plugins.lookup import LookupBase -from ansible.utils.cmd_functions import run_cmd -from ansible.module_utils.common.text.converters import to_text - -ANSIBLE_HIERA_CFG = os.getenv('ANSIBLE_HIERA_CFG', '/etc/hiera.yaml') -ANSIBLE_HIERA_BIN = os.getenv('ANSIBLE_HIERA_BIN', '/usr/bin/hiera') - - -class Hiera(object): - def get(self, hiera_key): - pargs = [ANSIBLE_HIERA_BIN] - pargs.extend(['-c', ANSIBLE_HIERA_CFG]) - - pargs.extend(hiera_key) - - rc, output, err = run_cmd("{0} -c {1} {2}".format( - ANSIBLE_HIERA_BIN, ANSIBLE_HIERA_CFG, hiera_key[0])) - - return to_text(output.strip()) - - -class LookupModule(LookupBase): - def run(self, terms, variables=''): - hiera = Hiera() - ret = [hiera.get(terms)] - return ret diff --git a/ansible_collections/community/general/plugins/lookup/keyring.py b/ansible_collections/community/general/plugins/lookup/keyring.py deleted file mode 100644 index 73f9c5f4..00000000 --- a/ansible_collections/community/general/plugins/lookup/keyring.py +++ /dev/null @@ -1,68 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2016, Samuel Boucher -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - name: keyring - author: - - Samuel Boucher (!UNKNOWN) - requirements: - - keyring (python library) - short_description: grab secrets from the OS keyring - description: - - Allows you to access data stored in the OS provided keyring/keychain. -''' - -EXAMPLES = """ -- name : output secrets to screen (BAD IDEA) - ansible.builtin.debug: - msg: "Password: {{item}}" - with_community.general.keyring: - - 'servicename username' - -- name: access mysql with password from keyring - mysql_db: login_password={{lookup('community.general.keyring','mysql joe')}} login_user=joe -""" - -RETURN = """ - _raw: - description: Secrets stored. - type: list - elements: str -""" - -HAS_KEYRING = True - -from ansible.errors import AnsibleError -from ansible.utils.display import Display - -try: - import keyring -except ImportError: - HAS_KEYRING = False - -from ansible.plugins.lookup import LookupBase - -display = Display() - - -class LookupModule(LookupBase): - - def run(self, terms, **kwargs): - if not HAS_KEYRING: - raise AnsibleError(u"Can't LOOKUP(keyring): missing required python library 'keyring'") - - display.vvvv(u"keyring: %s" % keyring.get_keyring()) - ret = [] - for term in terms: - (servicename, username) = (term.split()[0], term.split()[1]) - display.vvvv(u"username: %s, servicename: %s " % (username, servicename)) - password = keyring.get_password(servicename, username) - if password is None: - raise AnsibleError(u"servicename: %s for user %s not found" % (servicename, username)) - ret.append(password.rstrip()) - return ret diff --git a/ansible_collections/community/general/plugins/lookup/lastpass.py b/ansible_collections/community/general/plugins/lookup/lastpass.py deleted file mode 100644 index 920d3317..00000000 --- a/ansible_collections/community/general/plugins/lookup/lastpass.py +++ /dev/null @@ -1,100 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2016, Andrew Zenk -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - name: lastpass - author: - - Andrew Zenk (!UNKNOWN) - requirements: - - lpass (command line utility) - - must have already logged into lastpass - short_description: fetch data from lastpass - description: - - use the lpass command line utility to fetch specific fields from lastpass - options: - _terms: - description: key from which you want to retrieve the field - required: True - field: - description: field to return from lastpass - default: 'password' -''' - -EXAMPLES = """ -- name: get 'custom_field' from lastpass entry 'entry-name' - ansible.builtin.debug: - msg: "{{ lookup('community.general.lastpass', 'entry-name', field='custom_field') }}" -""" - -RETURN = """ - _raw: - description: secrets stored - type: list - elements: str -""" - -from subprocess import Popen, PIPE - -from ansible.errors import AnsibleError -from ansible.module_utils.common.text.converters import to_bytes, to_text -from ansible.plugins.lookup import LookupBase - - -class LPassException(AnsibleError): - pass - - -class LPass(object): - - def __init__(self, path='lpass'): - self._cli_path = path - - @property - def cli_path(self): - return self._cli_path - - @property - def logged_in(self): - out, err = self._run(self._build_args("logout"), stdin="n\n", expected_rc=1) - return err.startswith("Are you sure you would like to log out?") - - def _run(self, args, stdin=None, expected_rc=0): - p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE) - out, err = p.communicate(to_bytes(stdin)) - rc = p.wait() - if rc != expected_rc: - raise LPassException(err) - return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict') - - def _build_args(self, command, args=None): - if args is None: - args = [] - args = [command] + args - args += ["--color=never"] - return args - - def get_field(self, key, field): - if field in ['username', 'password', 'url', 'notes', 'id', 'name']: - out, err = self._run(self._build_args("show", ["--{0}".format(field), key])) - else: - out, err = self._run(self._build_args("show", ["--field={0}".format(field), key])) - return out.strip() - - -class LookupModule(LookupBase): - - def run(self, terms, variables=None, **kwargs): - lp = LPass() - - if not lp.logged_in: - raise AnsibleError("Not logged into lastpass: please run 'lpass login' first") - - field = kwargs.get('field', 'password') - values = [] - for term in terms: - values.append(lp.get_field(term, field)) - return values diff --git a/ansible_collections/community/general/plugins/lookup/onepassword.py b/ansible_collections/community/general/plugins/lookup/onepassword.py deleted file mode 100644 index 9f97a90e..00000000 --- a/ansible_collections/community/general/plugins/lookup/onepassword.py +++ /dev/null @@ -1,284 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Scott Buchanan -# Copyright: (c) 2016, Andrew Zenk (lastpass.py used as starting point) -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - name: onepassword - author: - - Scott Buchanan (@scottsb) - - Andrew Zenk (@azenk) - - Sam Doran (@samdoran) - requirements: - - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/) - short_description: fetch field values from 1Password - description: - - C(onepassword) wraps the C(op) command line utility to fetch specific field values from 1Password. - options: - _terms: - description: identifier(s) (UUID, name, or subdomain; case-insensitive) of item(s) to retrieve. - required: True - field: - description: field to return from each matching item (case-insensitive). - default: 'password' - master_password: - description: The password used to unlock the specified vault. - aliases: ['vault_password'] - section: - description: Item section containing the field to retrieve (case-insensitive). If absent will return first match from any section. - domain: - description: Domain of 1Password. Default is U(1password.com). - version_added: 3.2.0 - default: '1password.com' - type: str - subdomain: - description: The 1Password subdomain to authenticate against. - username: - description: The username used to sign in. - secret_key: - description: The secret key used when performing an initial sign in. - vault: - description: Vault containing the item to retrieve (case-insensitive). If absent will search all vaults. - notes: - - This lookup will use an existing 1Password session if one exists. If not, and you have already - performed an initial sign in (meaning C(~/.op/config exists)), then only the C(master_password) is required. - You may optionally specify C(subdomain) in this scenario, otherwise the last used subdomain will be used by C(op). - - This lookup can perform an initial login by providing C(subdomain), C(username), C(secret_key), and C(master_password). - - Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal credentials - needed at any given time. Also, store these credentials in an Ansible Vault using a key that is equal to or greater in strength - to the 1Password master password. - - This lookup stores potentially sensitive data from 1Password as Ansible facts. - Facts are subject to caching if enabled, which means this data could be stored in clear text - on disk or in a database. - - Tested with C(op) version 0.5.3 -''' - -EXAMPLES = """ -# These examples only work when already signed in to 1Password -- name: Retrieve password for KITT when already signed in to 1Password - ansible.builtin.debug: - var: lookup('community.general.onepassword', 'KITT') - -- name: Retrieve password for Wintermute when already signed in to 1Password - ansible.builtin.debug: - var: lookup('community.general.onepassword', 'Tessier-Ashpool', section='Wintermute') - -- name: Retrieve username for HAL when already signed in to 1Password - ansible.builtin.debug: - var: lookup('community.general.onepassword', 'HAL 9000', field='username', vault='Discovery') - -- name: Retrieve password for HAL when not signed in to 1Password - ansible.builtin.debug: - var: lookup('community.general.onepassword' - 'HAL 9000' - subdomain='Discovery' - master_password=vault_master_password) - -- name: Retrieve password for HAL when never signed in to 1Password - ansible.builtin.debug: - var: lookup('community.general.onepassword' - 'HAL 9000' - subdomain='Discovery' - master_password=vault_master_password - username='tweety@acme.com' - secret_key=vault_secret_key) -""" - -RETURN = """ - _raw: - description: field data requested - type: list - elements: str -""" - -import errno -import json -import os - -from subprocess import Popen, PIPE - -from ansible.plugins.lookup import LookupBase -from ansible.errors import AnsibleLookupError -from ansible.module_utils.common.text.converters import to_bytes, to_text - - -class OnePass(object): - - def __init__(self, path='op'): - self.cli_path = path - self.config_file_path = os.path.expanduser('~/.op/config') - self.logged_in = False - self.token = None - self.subdomain = None - self.domain = None - self.username = None - self.secret_key = None - self.master_password = None - - def get_token(self): - # If the config file exists, assume an initial signin has taken place and try basic sign in - if os.path.isfile(self.config_file_path): - - if not self.master_password: - raise AnsibleLookupError('Unable to sign in to 1Password. master_password is required.') - - try: - args = ['signin', '--output=raw'] - - if self.subdomain: - args = ['signin', self.subdomain, '--output=raw'] - - rc, out, err = self._run(args, command_input=to_bytes(self.master_password)) - self.token = out.strip() - - except AnsibleLookupError: - self.full_login() - - else: - # Attempt a full sign in since there appears to be no existing sign in - self.full_login() - - def assert_logged_in(self): - try: - rc, out, err = self._run(['get', 'account'], ignore_errors=True) - if rc == 0: - self.logged_in = True - if not self.logged_in: - self.get_token() - except OSError as e: - if e.errno == errno.ENOENT: - raise AnsibleLookupError("1Password CLI tool '%s' not installed in path on control machine" % self.cli_path) - raise e - - def get_raw(self, item_id, vault=None): - args = ["get", "item", item_id] - if vault is not None: - args += ['--vault={0}'.format(vault)] - if not self.logged_in: - args += [to_bytes('--session=') + self.token] - rc, output, dummy = self._run(args) - return output - - def get_field(self, item_id, field, section=None, vault=None): - output = self.get_raw(item_id, vault) - return self._parse_field(output, field, section) if output != '' else '' - - def full_login(self): - if None in [self.subdomain, self.username, self.secret_key, self.master_password]: - raise AnsibleLookupError('Unable to perform initial sign in to 1Password. ' - 'subdomain, username, secret_key, and master_password are required to perform initial sign in.') - - args = [ - 'signin', - '{0}.{1}'.format(self.subdomain, self.domain), - to_bytes(self.username), - to_bytes(self.secret_key), - '--output=raw', - ] - - rc, out, err = self._run(args, command_input=to_bytes(self.master_password)) - self.token = out.strip() - - def _run(self, args, expected_rc=0, command_input=None, ignore_errors=False): - command = [self.cli_path] + args - p = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE) - out, err = p.communicate(input=command_input) - rc = p.wait() - if not ignore_errors and rc != expected_rc: - raise AnsibleLookupError(to_text(err)) - return rc, out, err - - def _parse_field(self, data_json, field_name, section_title=None): - """ - Retrieves the desired field from the `op` response payload - - When the item is a `password` type, the password is a key within the `details` key: - - $ op get item 'test item' | jq - { - [...] - "templateUuid": "005", - "details": { - "notesPlain": "", - "password": "foobar", - "passwordHistory": [], - "sections": [ - { - "name": "linked items", - "title": "Related Items" - } - ] - }, - [...] - } - - However, when the item is a `login` type, the password is within a fields array: - - $ op get item 'test item' | jq - { - [...] - "details": { - "fields": [ - { - "designation": "username", - "name": "username", - "type": "T", - "value": "foo" - }, - { - "designation": "password", - "name": "password", - "type": "P", - "value": "bar" - } - ], - [...] - }, - [...] - """ - data = json.loads(data_json) - if section_title is None: - # https://github.com/ansible-collections/community.general/pull/1610: - # check the details dictionary for `field_name` and return it immediately if it exists - # when the entry is a "password" instead of a "login" item, the password field is a key - # in the `details` dictionary: - if field_name in data['details']: - return data['details'][field_name] - - # when the field is not found above, iterate through the fields list in the object details - for field_data in data['details'].get('fields', []): - if field_data.get('name', '').lower() == field_name.lower(): - return field_data.get('value', '') - for section_data in data['details'].get('sections', []): - if section_title is not None and section_title.lower() != section_data['title'].lower(): - continue - for field_data in section_data.get('fields', []): - if field_data.get('t', '').lower() == field_name.lower(): - return field_data.get('v', '') - return '' - - -class LookupModule(LookupBase): - - def run(self, terms, variables=None, **kwargs): - op = OnePass() - - field = kwargs.get('field', 'password') - section = kwargs.get('section') - vault = kwargs.get('vault') - op.subdomain = kwargs.get('subdomain') - op.domain = kwargs.get('domain', '1password.com') - op.username = kwargs.get('username') - op.secret_key = kwargs.get('secret_key') - op.master_password = kwargs.get('master_password', kwargs.get('vault_password')) - - op.assert_logged_in() - - values = [] - for term in terms: - values.append(op.get_field(term, field, section, vault)) - return values diff --git a/ansible_collections/community/general/plugins/lookup/onepassword_raw.py b/ansible_collections/community/general/plugins/lookup/onepassword_raw.py deleted file mode 100644 index d1958f78..00000000 --- a/ansible_collections/community/general/plugins/lookup/onepassword_raw.py +++ /dev/null @@ -1,92 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Scott Buchanan -# Copyright: (c) 2016, Andrew Zenk (lastpass.py used as starting point) -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - name: onepassword_raw - author: - - Scott Buchanan (@scottsb) - - Andrew Zenk (@azenk) - - Sam Doran (@samdoran) - requirements: - - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/) - short_description: fetch an entire item from 1Password - description: - - C(onepassword_raw) wraps C(op) command line utility to fetch an entire item from 1Password - options: - _terms: - description: identifier(s) (UUID, name, or domain; case-insensitive) of item(s) to retrieve. - required: True - master_password: - description: The password used to unlock the specified vault. - aliases: ['vault_password'] - section: - description: Item section containing the field to retrieve (case-insensitive). If absent will return first match from any section. - subdomain: - description: The 1Password subdomain to authenticate against. - username: - description: The username used to sign in. - secret_key: - description: The secret key used when performing an initial sign in. - vault: - description: Vault containing the item to retrieve (case-insensitive). If absent will search all vaults. - notes: - - This lookup will use an existing 1Password session if one exists. If not, and you have already - performed an initial sign in (meaning C(~/.op/config exists)), then only the C(master_password) is required. - You may optionally specify C(subdomain) in this scenario, otherwise the last used subdomain will be used by C(op). - - This lookup can perform an initial login by providing C(subdomain), C(username), C(secret_key), and C(master_password). - - Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal credentials - needed at any given time. Also, store these credentials in an Ansible Vault using a key that is equal to or greater in strength - to the 1Password master password. - - This lookup stores potentially sensitive data from 1Password as Ansible facts. - Facts are subject to caching if enabled, which means this data could be stored in clear text - on disk or in a database. - - Tested with C(op) version 0.5.3 -''' - -EXAMPLES = """ -- name: Retrieve all data about Wintermute - ansible.builtin.debug: - var: lookup('community.general.onepassword_raw', 'Wintermute') - -- name: Retrieve all data about Wintermute when not signed in to 1Password - ansible.builtin.debug: - var: lookup('community.general.onepassword_raw', 'Wintermute', subdomain='Turing', vault_password='DmbslfLvasjdl') -""" - -RETURN = """ - _raw: - description: field data requested - type: list - elements: dict -""" - -import json - -from ansible_collections.community.general.plugins.lookup.onepassword import OnePass -from ansible.plugins.lookup import LookupBase - - -class LookupModule(LookupBase): - - def run(self, terms, variables=None, **kwargs): - op = OnePass() - - vault = kwargs.get('vault') - op.subdomain = kwargs.get('subdomain') - op.username = kwargs.get('username') - op.secret_key = kwargs.get('secret_key') - op.master_password = kwargs.get('master_password', kwargs.get('vault_password')) - - op.assert_logged_in() - - values = [] - for term in terms: - data = json.loads(op.get_raw(term, vault)) - values.append(data) - return values diff --git a/ansible_collections/community/general/plugins/lookup/passwordstore.py b/ansible_collections/community/general/plugins/lookup/passwordstore.py deleted file mode 100644 index a221e496..00000000 --- a/ansible_collections/community/general/plugins/lookup/passwordstore.py +++ /dev/null @@ -1,427 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2017, Patrick Deelman -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' - name: passwordstore - author: - - Patrick Deelman (!UNKNOWN) - short_description: manage passwords with passwordstore.org's pass utility - description: - - Enables Ansible to retrieve, create or update passwords from the passwordstore.org pass utility. - It also retrieves YAML style keys stored as multilines in the passwordfile. - - To avoid problems when accessing multiple secrets at once, add C(auto-expand-secmem) to - C(~/.gnupg/gpg-agent.conf). Where this is not possible, consider using I(lock=readwrite) instead. - options: - _terms: - description: query key. - required: True - passwordstore: - description: location of the password store. - default: '~/.password-store' - directory: - description: The directory of the password store. - env: - - name: PASSWORD_STORE_DIR - create: - description: Create the password if it does not already exist. Takes precedence over C(missing). - type: bool - default: false - overwrite: - description: Overwrite the password if it does already exist. - type: bool - default: 'no' - umask: - description: - - Sets the umask for the created .gpg files. The first octed must be greater than 3 (user readable). - - Note pass' default value is C('077'). - env: - - name: PASSWORD_STORE_UMASK - version_added: 1.3.0 - returnall: - description: Return all the content of the password, not only the first line. - type: bool - default: 'no' - subkey: - description: Return a specific subkey of the password. When set to C(password), always returns the first line. - default: password - userpass: - description: Specify a password to save, instead of a generated one. - length: - description: The length of the generated password. - type: integer - default: 16 - backup: - description: Used with C(overwrite=yes). Backup the previous password in a subkey. - type: bool - default: 'no' - nosymbols: - description: use alphanumeric characters. - type: bool - default: 'no' - missing: - description: - - List of preference about what to do if the password file is missing. - - If I(create=true), the value for this option is ignored and assumed to be C(create). - - If set to C(error), the lookup will error out if the passname does not exist. - - If set to C(create), the passname will be created with the provided length I(length) if it does not exist. - - If set to C(empty) or C(warn), will return a C(none) in case the passname does not exist. - When using C(lookup) and not C(query), this will be translated to an empty string. - version_added: 3.1.0 - type: str - default: error - choices: - - error - - warn - - empty - - create - lock: - description: - - How to synchronize operations. - - The default of C(write) only synchronizes write operations. - - C(readwrite) synchronizes all operations (including read). This makes sure that gpg-agent is never called in parallel. - - C(none) does not do any synchronization. - ini: - - section: passwordstore_lookup - key: lock - type: str - default: write - choices: - - readwrite - - write - - none - version_added: 4.5.0 - locktimeout: - description: - - Lock timeout applied when I(lock) is not C(none). - - Time with a unit suffix, C(s), C(m), C(h) for seconds, minutes, and hours, respectively. For example, C(900s) equals C(15m). - - Correlates with C(pinentry-timeout) in C(~/.gnupg/gpg-agent.conf), see C(man gpg-agent) for details. - ini: - - section: passwordstore_lookup - key: locktimeout - type: str - default: 15m - version_added: 4.5.0 -''' -EXAMPLES = """ -ansible.cfg: | - [passwordstore_lookup] - lock=readwrite - locktimeout=45s - -playbook.yml: | - --- - - # Debug is used for examples, BAD IDEA to show passwords on screen - - name: Basic lookup. Fails if example/test does not exist - ansible.builtin.debug: - msg: "{{ lookup('community.general.passwordstore', 'example/test')}}" - - - name: Basic lookup. Warns if example/test does not exist and returns empty string - ansible.builtin.debug: - msg: "{{ lookup('community.general.passwordstore', 'example/test missing=warn')}}" - - - name: Create pass with random 16 character password. If password exists just give the password - ansible.builtin.debug: - var: mypassword - vars: - mypassword: "{{ lookup('community.general.passwordstore', 'example/test create=true')}}" - - - name: Create pass with random 16 character password. If password exists just give the password - ansible.builtin.debug: - var: mypassword - vars: - mypassword: "{{ lookup('community.general.passwordstore', 'example/test missing=create')}}" - - - name: Prints 'abc' if example/test does not exist, just give the password otherwise - ansible.builtin.debug: - var: mypassword - vars: - mypassword: "{{ lookup('community.general.passwordstore', 'example/test missing=empty') | default('abc', true) }}" - - - name: Different size password - ansible.builtin.debug: - msg: "{{ lookup('community.general.passwordstore', 'example/test create=true length=42')}}" - - - name: Create password and overwrite the password if it exists. As a bonus, this module includes the old password inside the pass file - ansible.builtin.debug: - msg: "{{ lookup('community.general.passwordstore', 'example/test create=true overwrite=true')}}" - - - name: Create an alphanumeric password - ansible.builtin.debug: - msg: "{{ lookup('community.general.passwordstore', 'example/test create=true nosymbols=true') }}" - - - name: Return the value for user in the KV pair user, username - ansible.builtin.debug: - msg: "{{ lookup('community.general.passwordstore', 'example/test subkey=user')}}" - - - name: Return the entire password file content - ansible.builtin.set_fact: - passfilecontent: "{{ lookup('community.general.passwordstore', 'example/test returnall=true')}}" -""" - -RETURN = """ -_raw: - description: - - a password - type: list - elements: str -""" - -from contextlib import contextmanager -import os -import re -import subprocess -import time -import yaml - -from ansible.errors import AnsibleError, AnsibleAssertionError -from ansible.module_utils.common.file import FileLock -from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text -from ansible.module_utils.parsing.convert_bool import boolean -from ansible.utils.display import Display -from ansible.utils.encrypt import random_password -from ansible.plugins.lookup import LookupBase -from ansible import constants as C - -display = Display() - - -# backhacked check_output with input for python 2.7 -# http://stackoverflow.com/questions/10103551/passing-data-to-subprocess-check-output -# note: contains special logic for calling 'pass', so not a drop-in replacement for check_output -def check_output2(*popenargs, **kwargs): - if 'stdout' in kwargs: - raise ValueError('stdout argument not allowed, it will be overridden.') - if 'stderr' in kwargs: - raise ValueError('stderr argument not allowed, it will be overridden.') - if 'input' in kwargs: - if 'stdin' in kwargs: - raise ValueError('stdin and input arguments may not both be used.') - b_inputdata = to_bytes(kwargs['input'], errors='surrogate_or_strict') - del kwargs['input'] - kwargs['stdin'] = subprocess.PIPE - else: - b_inputdata = None - process = subprocess.Popen(*popenargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs) - try: - b_out, b_err = process.communicate(b_inputdata) - except Exception: - process.kill() - process.wait() - raise - retcode = process.poll() - if retcode == 0 and (b'encryption failed: Unusable public key' in b_out or - b'encryption failed: Unusable public key' in b_err): - retcode = 78 # os.EX_CONFIG - if retcode != 0: - cmd = kwargs.get("args") - if cmd is None: - cmd = popenargs[0] - raise subprocess.CalledProcessError( - retcode, - cmd, - to_native(b_out + b_err, errors='surrogate_or_strict') - ) - return b_out - - -class LookupModule(LookupBase): - def parse_params(self, term): - # I went with the "traditional" param followed with space separated KV pairs. - # Waiting for final implementation of lookup parameter parsing. - # See: https://github.com/ansible/ansible/issues/12255 - params = term.split() - if len(params) > 0: - # the first param is the pass-name - self.passname = params[0] - # next parse the optional parameters in keyvalue pairs - try: - for param in params[1:]: - name, value = param.split('=', 1) - if name not in self.paramvals: - raise AnsibleAssertionError('%s not in paramvals' % name) - self.paramvals[name] = value - except (ValueError, AssertionError) as e: - raise AnsibleError(e) - # check and convert values - try: - for key in ['create', 'returnall', 'overwrite', 'backup', 'nosymbols']: - if not isinstance(self.paramvals[key], bool): - self.paramvals[key] = boolean(self.paramvals[key]) - except (ValueError, AssertionError) as e: - raise AnsibleError(e) - if self.paramvals['missing'] not in ['error', 'warn', 'create', 'empty']: - raise AnsibleError("{0} is not a valid option for missing".format(self.paramvals['missing'])) - if not isinstance(self.paramvals['length'], int): - if self.paramvals['length'].isdigit(): - self.paramvals['length'] = int(self.paramvals['length']) - else: - raise AnsibleError("{0} is not a correct value for length".format(self.paramvals['length'])) - - if self.paramvals['create']: - self.paramvals['missing'] = 'create' - - # Collect pass environment variables from the plugin's parameters. - self.env = os.environ.copy() - self.env['LANGUAGE'] = 'C' # make sure to get errors in English as required by check_output2 - - # Set PASSWORD_STORE_DIR - if os.path.isdir(self.paramvals['directory']): - self.env['PASSWORD_STORE_DIR'] = self.paramvals['directory'] - else: - raise AnsibleError('Passwordstore directory \'{0}\' does not exist'.format(self.paramvals['directory'])) - - # Set PASSWORD_STORE_UMASK if umask is set - if 'umask' in self.paramvals: - if len(self.paramvals['umask']) != 3: - raise AnsibleError('Passwordstore umask must have a length of 3.') - elif int(self.paramvals['umask'][0]) > 3: - raise AnsibleError('Passwordstore umask not allowed (password not user readable).') - else: - self.env['PASSWORD_STORE_UMASK'] = self.paramvals['umask'] - - def check_pass(self): - try: - self.passoutput = to_text( - check_output2(["pass", "show", self.passname], env=self.env), - errors='surrogate_or_strict' - ).splitlines() - self.password = self.passoutput[0] - self.passdict = {} - try: - values = yaml.safe_load('\n'.join(self.passoutput[1:])) - for key, item in values.items(): - self.passdict[key] = item - except (yaml.YAMLError, AttributeError): - for line in self.passoutput[1:]: - if ':' in line: - name, value = line.split(':', 1) - self.passdict[name.strip()] = value.strip() - if os.path.isfile(os.path.join(self.paramvals['directory'], self.passname + ".gpg")): - # Only accept password as found, if there a .gpg file for it (might be a tree node otherwise) - return True - except (subprocess.CalledProcessError) as e: - # 'not in password store' is the expected error if a password wasn't found - if 'not in the password store' not in e.output: - raise AnsibleError(e) - - if self.paramvals['missing'] == 'error': - raise AnsibleError('passwordstore: passname {0} not found and missing=error is set'.format(self.passname)) - elif self.paramvals['missing'] == 'warn': - display.warning('passwordstore: passname {0} not found'.format(self.passname)) - - return False - - def get_newpass(self): - if self.paramvals['nosymbols']: - chars = C.DEFAULT_PASSWORD_CHARS[:62] - else: - chars = C.DEFAULT_PASSWORD_CHARS - - if self.paramvals['userpass']: - newpass = self.paramvals['userpass'] - else: - newpass = random_password(length=self.paramvals['length'], chars=chars) - return newpass - - def update_password(self): - # generate new password, insert old lines from current result and return new password - newpass = self.get_newpass() - datetime = time.strftime("%d/%m/%Y %H:%M:%S") - msg = newpass + '\n' - if self.passoutput[1:]: - msg += '\n'.join(self.passoutput[1:]) + '\n' - if self.paramvals['backup']: - msg += "lookup_pass: old password was {0} (Updated on {1})\n".format(self.password, datetime) - try: - check_output2(['pass', 'insert', '-f', '-m', self.passname], input=msg, env=self.env) - except (subprocess.CalledProcessError) as e: - raise AnsibleError(e) - return newpass - - def generate_password(self): - # generate new file and insert lookup_pass: Generated by Ansible on {date} - # use pwgen to generate the password and insert values with pass -m - newpass = self.get_newpass() - datetime = time.strftime("%d/%m/%Y %H:%M:%S") - msg = newpass + '\n' + "lookup_pass: First generated by ansible on {0}\n".format(datetime) - try: - check_output2(['pass', 'insert', '-f', '-m', self.passname], input=msg, env=self.env) - except (subprocess.CalledProcessError) as e: - raise AnsibleError(e) - return newpass - - def get_passresult(self): - if self.paramvals['returnall']: - return os.linesep.join(self.passoutput) - if self.paramvals['subkey'] == 'password': - return self.password - else: - if self.paramvals['subkey'] in self.passdict: - return self.passdict[self.paramvals['subkey']] - else: - return None - - @contextmanager - def opt_lock(self, type): - if self.get_option('lock') == type: - tmpdir = os.environ.get('TMPDIR', '/tmp') - lockfile = os.path.join(tmpdir, '.passwordstore.lock') - with FileLock().lock_file(lockfile, tmpdir, self.lock_timeout): - self.locked = type - yield - self.locked = None - else: - yield - - def setup(self, variables): - self.locked = None - timeout = self.get_option('locktimeout') - if not re.match('^[0-9]+[smh]$', timeout): - raise AnsibleError("{0} is not a correct value for locktimeout".format(timeout)) - unit_to_seconds = {"s": 1, "m": 60, "h": 3600} - self.lock_timeout = int(timeout[:-1]) * unit_to_seconds[timeout[-1]] - self.paramvals = { - 'subkey': 'password', - 'directory': variables.get('passwordstore', os.environ.get( - 'PASSWORD_STORE_DIR', - os.path.expanduser('~/.password-store'))), - 'create': False, - 'returnall': False, - 'overwrite': False, - 'nosymbols': False, - 'userpass': '', - 'length': 16, - 'backup': False, - 'missing': 'error', - } - - def run(self, terms, variables, **kwargs): - self.setup(variables) - result = [] - - for term in terms: - self.parse_params(term) # parse the input into paramvals - with self.opt_lock('readwrite'): - if self.check_pass(): # password exists - if self.paramvals['overwrite'] and self.paramvals['subkey'] == 'password': - with self.opt_lock('write'): - result.append(self.update_password()) - else: - result.append(self.get_passresult()) - else: # password does not exist - if self.paramvals['missing'] == 'create': - with self.opt_lock('write'): - if self.locked == 'write' and self.check_pass(): # lookup password again if under write lock - result.append(self.get_passresult()) - else: - result.append(self.generate_password()) - else: - result.append(None) - - return result diff --git a/ansible_collections/community/general/plugins/lookup/redis.py b/ansible_collections/community/general/plugins/lookup/redis.py deleted file mode 100644 index 8de7e04c..00000000 --- a/ansible_collections/community/general/plugins/lookup/redis.py +++ /dev/null @@ -1,118 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2012, Jan-Piet Mens -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - name: redis - author: - - Jan-Piet Mens (@jpmens) - - Ansible Core Team - short_description: fetch data from Redis - description: - - This lookup returns a list of results from a Redis DB corresponding to a list of items given to it - requirements: - - redis (python library https://github.com/andymccurdy/redis-py/) - options: - _terms: - description: list of keys to query - host: - description: location of Redis host - default: '127.0.0.1' - env: - - name: ANSIBLE_REDIS_HOST - ini: - - section: lookup_redis - key: host - port: - description: port on which Redis is listening on - default: 6379 - type: int - env: - - name: ANSIBLE_REDIS_PORT - ini: - - section: lookup_redis - key: port - socket: - description: path to socket on which to query Redis, this option overrides host and port options when set. - type: path - env: - - name: ANSIBLE_REDIS_SOCKET - ini: - - section: lookup_redis - key: socket -''' - -EXAMPLES = """ -- name: query redis for somekey (default or configured settings used) - ansible.builtin.debug: - msg: "{{ lookup('community.general.redis', 'somekey') }}" - -- name: query redis for list of keys and non-default host and port - ansible.builtin.debug: - msg: "{{ lookup('community.general.redis', item, host='myredis.internal.com', port=2121) }}" - loop: '{{list_of_redis_keys}}' - -- name: use list directly - ansible.builtin.debug: - msg: "{{ lookup('community.general.redis', 'key1', 'key2', 'key3') }}" - -- name: use list directly with a socket - ansible.builtin.debug: - msg: "{{ lookup('community.general.redis', 'key1', 'key2', socket='/var/tmp/redis.sock') }}" - -""" - -RETURN = """ -_raw: - description: value(s) stored in Redis - type: list - elements: str -""" - -import os - -HAVE_REDIS = False -try: - import redis - HAVE_REDIS = True -except ImportError: - pass - -from ansible.module_utils.common.text.converters import to_text -from ansible.errors import AnsibleError -from ansible.plugins.lookup import LookupBase - - -class LookupModule(LookupBase): - - def run(self, terms, variables, **kwargs): - - if not HAVE_REDIS: - raise AnsibleError("Can't LOOKUP(redis_kv): module redis is not installed") - - # get options - self.set_options(direct=kwargs) - - # setup connection - host = self.get_option('host') - port = self.get_option('port') - socket = self.get_option('socket') - if socket is None: - conn = redis.Redis(host=host, port=port) - else: - conn = redis.Redis(unix_socket_path=socket) - - ret = [] - for term in terms: - try: - res = conn.get(term) - if res is None: - res = "" - ret.append(to_text(res)) - except Exception as e: - # connection failed or key not found - raise AnsibleError('Encountered exception while fetching {0}: {1}'.format(term, e)) - return ret diff --git a/ansible_collections/community/general/plugins/lookup/tss.py b/ansible_collections/community/general/plugins/lookup/tss.py deleted file mode 100644 index 880e6e38..00000000 --- a/ansible_collections/community/general/plugins/lookup/tss.py +++ /dev/null @@ -1,288 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2020, Adam Migus -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = r""" -name: tss -author: Adam Migus (@amigus) -short_description: Get secrets from Thycotic Secret Server -version_added: 1.0.0 -description: - - Uses the Thycotic Secret Server Python SDK to get Secrets from Secret - Server using token authentication with I(username) and I(password) on - the REST API at I(base_url). - - When using self-signed certificates the environment variable - C(REQUESTS_CA_BUNDLE) can be set to a file containing the trusted certificates - (in C(.pem) format). - - For example, C(export REQUESTS_CA_BUNDLE='/etc/ssl/certs/ca-bundle.trust.crt'). -requirements: - - python-tss-sdk - https://pypi.org/project/python-tss-sdk/ -options: - _terms: - description: The integer ID of the secret. - required: true - type: int - base_url: - description: The base URL of the server, e.g. C(https://localhost/SecretServer). - env: - - name: TSS_BASE_URL - ini: - - section: tss_lookup - key: base_url - required: true - username: - description: The username with which to request the OAuth2 Access Grant. - env: - - name: TSS_USERNAME - ini: - - section: tss_lookup - key: username - password: - description: - - The password associated with the supplied username. - - Required when I(token) is not provided. - env: - - name: TSS_PASSWORD - ini: - - section: tss_lookup - key: password - domain: - default: "" - description: - - The domain with which to request the OAuth2 Access Grant. - - Optional when I(token) is not provided. - - Requires C(python-tss-sdk) version 1.0.0 or greater. - env: - - name: TSS_DOMAIN - ini: - - section: tss_lookup - key: domain - required: false - version_added: 3.6.0 - token: - description: - - Existing token for Thycotic authorizer. - - If provided, I(username) and I(password) are not needed. - - Requires C(python-tss-sdk) version 1.0.0 or greater. - env: - - name: TSS_TOKEN - ini: - - section: tss_lookup - key: token - version_added: 3.7.0 - api_path_uri: - default: /api/v1 - description: The path to append to the base URL to form a valid REST - API request. - env: - - name: TSS_API_PATH_URI - required: false - token_path_uri: - default: /oauth2/token - description: The path to append to the base URL to form a valid OAuth2 - Access Grant request. - env: - - name: TSS_TOKEN_PATH_URI - required: false -""" - -RETURN = r""" -_list: - description: - - The JSON responses to C(GET /secrets/{id}). - - See U(https://updates.thycotic.net/secretserver/restapiguide/TokenAuth/#operation--secrets--id--get). - type: list - elements: dict -""" - -EXAMPLES = r""" -- hosts: localhost - vars: - secret: >- - {{ - lookup( - 'community.general.tss', - 102, - base_url='https://secretserver.domain.com/SecretServer/', - username='user.name', - password='password' - ) - }} - tasks: - - ansible.builtin.debug: - msg: > - the password is {{ - (secret['items'] - | items2dict(key_name='slug', - value_name='itemValue'))['password'] - }} - -- hosts: localhost - vars: - secret: >- - {{ - lookup( - 'community.general.tss', - 102, - base_url='https://secretserver.domain.com/SecretServer/', - username='user.name', - password='password', - domain='domain' - ) - }} - tasks: - - ansible.builtin.debug: - msg: > - the password is {{ - (secret['items'] - | items2dict(key_name='slug', - value_name='itemValue'))['password'] - }} - -- hosts: localhost - vars: - secret_password: >- - {{ - ((lookup( - 'community.general.tss', - 102, - base_url='https://secretserver.domain.com/SecretServer/', - token='thycotic_access_token', - ) | from_json).get('items') | items2dict(key_name='slug', value_name='itemValue'))['password'] - }} - tasks: - - ansible.builtin.debug: - msg: the password is {{ secret_password }} -""" - -import abc - -from ansible.errors import AnsibleError, AnsibleOptionsError -from ansible.module_utils import six -from ansible.plugins.lookup import LookupBase -from ansible.utils.display import Display - -try: - from thycotic.secrets.server import SecretServer, SecretServerError - - HAS_TSS_SDK = True -except ImportError: - SecretServer = None - SecretServerError = None - HAS_TSS_SDK = False - -try: - from thycotic.secrets.server import PasswordGrantAuthorizer, DomainPasswordGrantAuthorizer, AccessTokenAuthorizer - - HAS_TSS_AUTHORIZER = True -except ImportError: - PasswordGrantAuthorizer = None - DomainPasswordGrantAuthorizer = None - AccessTokenAuthorizer = None - HAS_TSS_AUTHORIZER = False - - -display = Display() - - -@six.add_metaclass(abc.ABCMeta) -class TSSClient(object): - def __init__(self): - self._client = None - - @staticmethod - def from_params(**server_parameters): - if HAS_TSS_AUTHORIZER: - return TSSClientV1(**server_parameters) - else: - return TSSClientV0(**server_parameters) - - def get_secret(self, term): - display.debug("tss_lookup term: %s" % term) - - secret_id = self._term_to_secret_id(term) - display.vvv(u"Secret Server lookup of Secret with ID %d" % secret_id) - - return self._client.get_secret_json(secret_id) - - @staticmethod - def _term_to_secret_id(term): - try: - return int(term) - except ValueError: - raise AnsibleOptionsError("Secret ID must be an integer") - - -class TSSClientV0(TSSClient): - def __init__(self, **server_parameters): - super(TSSClientV0, self).__init__() - - if server_parameters.get("domain"): - raise AnsibleError("The 'domain' option requires 'python-tss-sdk' version 1.0.0 or greater") - - self._client = SecretServer( - server_parameters["base_url"], - server_parameters["username"], - server_parameters["password"], - server_parameters["api_path_uri"], - server_parameters["token_path_uri"], - ) - - -class TSSClientV1(TSSClient): - def __init__(self, **server_parameters): - super(TSSClientV1, self).__init__() - - authorizer = self._get_authorizer(**server_parameters) - self._client = SecretServer( - server_parameters["base_url"], authorizer, server_parameters["api_path_uri"] - ) - - @staticmethod - def _get_authorizer(**server_parameters): - if server_parameters.get("token"): - return AccessTokenAuthorizer( - server_parameters["token"], - ) - - if server_parameters.get("domain"): - return DomainPasswordGrantAuthorizer( - server_parameters["base_url"], - server_parameters["username"], - server_parameters["domain"], - server_parameters["password"], - server_parameters["token_path_uri"], - ) - - return PasswordGrantAuthorizer( - server_parameters["base_url"], - server_parameters["username"], - server_parameters["password"], - server_parameters["token_path_uri"], - ) - - -class LookupModule(LookupBase): - def run(self, terms, variables, **kwargs): - if not HAS_TSS_SDK: - raise AnsibleError("python-tss-sdk must be installed to use this plugin") - - self.set_options(var_options=variables, direct=kwargs) - - tss = TSSClient.from_params( - base_url=self.get_option("base_url"), - username=self.get_option("username"), - password=self.get_option("password"), - domain=self.get_option("domain"), - token=self.get_option("token"), - api_path_uri=self.get_option("api_path_uri"), - token_path_uri=self.get_option("token_path_uri"), - ) - - try: - return [tss.get_secret(term) for term in terms] - except SecretServerError as error: - raise AnsibleError("Secret Server lookup failure: %s" % error.message) diff --git a/ansible_collections/community/general/plugins/module_utils/_mount.py b/ansible_collections/community/general/plugins/module_utils/_mount.py deleted file mode 100644 index 391d4681..00000000 --- a/ansible_collections/community/general/plugins/module_utils/_mount.py +++ /dev/null @@ -1,95 +0,0 @@ -# -*- coding: utf-8 -*- -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is based on -# Lib/posixpath.py of cpython -# It is licensed under the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 -# -# 1. This LICENSE AGREEMENT is between the Python Software Foundation -# ("PSF"), and the Individual or Organization ("Licensee") accessing and -# otherwise using this software ("Python") in source or binary form and -# its associated documentation. -# -# 2. Subject to the terms and conditions of this License Agreement, PSF hereby -# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, -# analyze, test, perform and/or display publicly, prepare derivative works, -# distribute, and otherwise use Python alone or in any derivative version, -# provided, however, that PSF's License Agreement and PSF's notice of copyright, -# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, -# 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved" -# are retained in Python alone or in any derivative version prepared by Licensee. -# -# 3. In the event Licensee prepares a derivative work that is based on -# or incorporates Python or any part thereof, and wants to make -# the derivative work available to others as provided herein, then -# Licensee hereby agrees to include in any such work a brief summary of -# the changes made to Python. -# -# 4. PSF is making Python available to Licensee on an "AS IS" -# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR -# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND -# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS -# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT -# INFRINGE ANY THIRD PARTY RIGHTS. -# -# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON -# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS -# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, -# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. -# -# 6. This License Agreement will automatically terminate upon a material -# breach of its terms and conditions. -# -# 7. Nothing in this License Agreement shall be deemed to create any -# relationship of agency, partnership, or joint venture between PSF and -# Licensee. This License Agreement does not grant permission to use PSF -# trademarks or trade name in a trademark sense to endorse or promote -# products or services of Licensee, or any third party. -# -# 8. By copying, installing or otherwise using Python, Licensee -# agrees to be bound by the terms and conditions of this License -# Agreement. - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -import os - - -def ismount(path): - """Test whether a path is a mount point - This is a copy of the upstream version of ismount(). Originally this was copied here as a workaround - until Python issue 2466 was fixed. Now it is here so this will work on older versions of Python - that may not have the upstream fix. - https://github.com/ansible/ansible-modules-core/issues/2186 - http://bugs.python.org/issue2466 - """ - try: - s1 = os.lstat(path) - except (OSError, ValueError): - # It doesn't exist -- so not a mount point. :-) - return False - else: - # A symlink can never be a mount point - if os.path.stat.S_ISLNK(s1.st_mode): - return False - - if isinstance(path, bytes): - parent = os.path.join(path, b'..') - else: - parent = os.path.join(path, '..') - parent = os.path.realpath(parent) - try: - s2 = os.lstat(parent) - except (OSError, ValueError): - return False - - dev1 = s1.st_dev - dev2 = s2.st_dev - if dev1 != dev2: - return True # path/.. on a different device as path - ino1 = s1.st_ino - ino2 = s2.st_ino - if ino1 == ino2: - return True # path/.. is the same i-node as path - return False diff --git a/ansible_collections/community/general/plugins/module_utils/dimensiondata.py b/ansible_collections/community/general/plugins/module_utils/dimensiondata.py deleted file mode 100644 index bcb02e84..00000000 --- a/ansible_collections/community/general/plugins/module_utils/dimensiondata.py +++ /dev/null @@ -1,330 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (c) 2016 Dimension Data -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# -# Authors: -# - Aimon Bustardo -# - Mark Maglana -# - Adam Friedman -# -# Common functionality to be used by various module components - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import os -import re -import traceback - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.six.moves import configparser -from os.path import expanduser -from uuid import UUID - -LIBCLOUD_IMP_ERR = None -try: - from libcloud.common.dimensiondata import API_ENDPOINTS, DimensionDataAPIException, DimensionDataStatus - from libcloud.compute.base import Node, NodeLocation - from libcloud.compute.providers import get_driver - from libcloud.compute.types import Provider - - import libcloud.security - - HAS_LIBCLOUD = True -except ImportError: - LIBCLOUD_IMP_ERR = traceback.format_exc() - HAS_LIBCLOUD = False - -# MCP 2.x version patten for location (datacenter) names. -# -# Note that this is not a totally reliable way of determining MCP version. -# Unfortunately, libcloud's NodeLocation currently makes no provision for extended properties. -# At some point we may therefore want to either enhance libcloud or enable overriding mcp_version -# by specifying it in the module parameters. -MCP_2_LOCATION_NAME_PATTERN = re.compile(r".*MCP\s?2.*") - - -class DimensionDataModule(object): - """ - The base class containing common functionality used by Dimension Data modules for Ansible. - """ - - def __init__(self, module): - """ - Create a new DimensionDataModule. - - Will fail if Apache libcloud is not present. - - :param module: The underlying Ansible module. - :type module: AnsibleModule - """ - - self.module = module - - if not HAS_LIBCLOUD: - self.module.fail_json(msg=missing_required_lib('libcloud'), exception=LIBCLOUD_IMP_ERR) - - # Credentials are common to all Dimension Data modules. - credentials = self.get_credentials() - self.user_id = credentials['user_id'] - self.key = credentials['key'] - - # Region and location are common to all Dimension Data modules. - region = self.module.params['region'] - self.region = 'dd-{0}'.format(region) - self.location = self.module.params['location'] - - libcloud.security.VERIFY_SSL_CERT = self.module.params['validate_certs'] - - self.driver = get_driver(Provider.DIMENSIONDATA)( - self.user_id, - self.key, - region=self.region - ) - - # Determine the MCP API version (this depends on the target datacenter). - self.mcp_version = self.get_mcp_version(self.location) - - # Optional "wait-for-completion" arguments - if 'wait' in self.module.params: - self.wait = self.module.params['wait'] - self.wait_time = self.module.params['wait_time'] - self.wait_poll_interval = self.module.params['wait_poll_interval'] - else: - self.wait = False - self.wait_time = 0 - self.wait_poll_interval = 0 - - def get_credentials(self): - """ - Get user_id and key from module configuration, environment, or dotfile. - Order of priority is module, environment, dotfile. - - To set in environment: - - export MCP_USER='myusername' - export MCP_PASSWORD='mypassword' - - To set in dot file place a file at ~/.dimensiondata with - the following contents: - - [dimensiondatacloud] - MCP_USER: myusername - MCP_PASSWORD: mypassword - """ - - if not HAS_LIBCLOUD: - self.module.fail_json(msg='libcloud is required for this module.') - - user_id = None - key = None - - # First, try the module configuration - if 'mcp_user' in self.module.params: - if 'mcp_password' not in self.module.params: - self.module.fail_json( - msg='"mcp_user" parameter was specified, but not "mcp_password" (either both must be specified, or neither).' - ) - - user_id = self.module.params['mcp_user'] - key = self.module.params['mcp_password'] - - # Fall back to environment - if not user_id or not key: - user_id = os.environ.get('MCP_USER', None) - key = os.environ.get('MCP_PASSWORD', None) - - # Finally, try dotfile (~/.dimensiondata) - if not user_id or not key: - home = expanduser('~') - config = configparser.RawConfigParser() - config.read("%s/.dimensiondata" % home) - - try: - user_id = config.get("dimensiondatacloud", "MCP_USER") - key = config.get("dimensiondatacloud", "MCP_PASSWORD") - except (configparser.NoSectionError, configparser.NoOptionError): - pass - - # One or more credentials not found. Function can't recover from this - # so it has to raise an error instead of fail silently. - if not user_id: - raise MissingCredentialsError("Dimension Data user id not found") - elif not key: - raise MissingCredentialsError("Dimension Data key not found") - - # Both found, return data - return dict(user_id=user_id, key=key) - - def get_mcp_version(self, location): - """ - Get the MCP version for the specified location. - """ - - location = self.driver.ex_get_location_by_id(location) - if MCP_2_LOCATION_NAME_PATTERN.match(location.name): - return '2.0' - - return '1.0' - - def get_network_domain(self, locator, location): - """ - Retrieve a network domain by its name or Id. - """ - - if is_uuid(locator): - network_domain = self.driver.ex_get_network_domain(locator) - else: - matching_network_domains = [ - network_domain for network_domain in self.driver.ex_list_network_domains(location=location) - if network_domain.name == locator - ] - - if matching_network_domains: - network_domain = matching_network_domains[0] - else: - network_domain = None - - if network_domain: - return network_domain - - raise UnknownNetworkError("Network '%s' could not be found" % locator) - - def get_vlan(self, locator, location, network_domain): - """ - Get a VLAN object by its name or id - """ - if is_uuid(locator): - vlan = self.driver.ex_get_vlan(locator) - else: - matching_vlans = [ - vlan for vlan in self.driver.ex_list_vlans(location, network_domain) - if vlan.name == locator - ] - - if matching_vlans: - vlan = matching_vlans[0] - else: - vlan = None - - if vlan: - return vlan - - raise UnknownVLANError("VLAN '%s' could not be found" % locator) - - @staticmethod - def argument_spec(**additional_argument_spec): - """ - Build an argument specification for a Dimension Data module. - :param additional_argument_spec: An optional dictionary representing the specification for additional module arguments (if any). - :return: A dict containing the argument specification. - """ - - spec = dict( - region=dict(type='str', default='na'), - mcp_user=dict(type='str', required=False), - mcp_password=dict(type='str', required=False, no_log=True), - location=dict(type='str', required=True), - validate_certs=dict(type='bool', required=False, default=True) - ) - - if additional_argument_spec: - spec.update(additional_argument_spec) - - return spec - - @staticmethod - def argument_spec_with_wait(**additional_argument_spec): - """ - Build an argument specification for a Dimension Data module that includes "wait for completion" arguments. - :param additional_argument_spec: An optional dictionary representing the specification for additional module arguments (if any). - :return: A dict containing the argument specification. - """ - - spec = DimensionDataModule.argument_spec( - wait=dict(type='bool', required=False, default=False), - wait_time=dict(type='int', required=False, default=600), - wait_poll_interval=dict(type='int', required=False, default=2) - ) - - if additional_argument_spec: - spec.update(additional_argument_spec) - - return spec - - @staticmethod - def required_together(*additional_required_together): - """ - Get the basic argument specification for Dimension Data modules indicating which arguments are must be specified together. - :param additional_required_together: An optional list representing the specification for additional module arguments that must be specified together. - :return: An array containing the argument specifications. - """ - - required_together = [ - ['mcp_user', 'mcp_password'] - ] - - if additional_required_together: - required_together.extend(additional_required_together) - - return required_together - - -class LibcloudNotFound(Exception): - """ - Exception raised when Apache libcloud cannot be found. - """ - - pass - - -class MissingCredentialsError(Exception): - """ - Exception raised when credentials for Dimension Data CloudControl cannot be found. - """ - - pass - - -class UnknownNetworkError(Exception): - """ - Exception raised when a network or network domain cannot be found. - """ - - pass - - -class UnknownVLANError(Exception): - """ - Exception raised when a VLAN cannot be found. - """ - - pass - - -def get_dd_regions(): - """ - Get the list of available regions whose vendor is Dimension Data. - """ - - # Get endpoints - all_regions = API_ENDPOINTS.keys() - - # Only Dimension Data endpoints (no prefix) - regions = [region[3:] for region in all_regions if region.startswith('dd-')] - - return regions - - -def is_uuid(u, version=4): - """ - Test if valid v4 UUID - """ - try: - uuid_obj = UUID(u, version=version) - - return str(uuid_obj) == u - except ValueError: - return False diff --git a/ansible_collections/community/general/plugins/module_utils/gitlab.py b/ansible_collections/community/general/plugins/module_utils/gitlab.py deleted file mode 100644 index 21af10b5..00000000 --- a/ansible_collections/community/general/plugins/module_utils/gitlab.py +++ /dev/null @@ -1,104 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) -# Copyright: (c) 2018, Marcus Watkins -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from ansible.module_utils.basic import missing_required_lib -from ansible.module_utils.common.text.converters import to_native - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -try: - from urllib import quote_plus # Python 2.X - from urlparse import urljoin -except ImportError: - from urllib.parse import quote_plus, urljoin # Python 3+ - -import traceback - -GITLAB_IMP_ERR = None -try: - import gitlab - import requests - HAS_GITLAB_PACKAGE = True -except Exception: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_GITLAB_PACKAGE = False - - -def auth_argument_spec(spec=None): - arg_spec = (dict( - api_token=dict(type='str', no_log=True), - api_oauth_token=dict(type='str', no_log=True), - api_job_token=dict(type='str', no_log=True), - )) - if spec: - arg_spec.update(spec) - return arg_spec - - -def find_project(gitlab_instance, identifier): - try: - project = gitlab_instance.projects.get(identifier) - except Exception as e: - current_user = gitlab_instance.user - try: - project = gitlab_instance.projects.get(current_user.username + '/' + identifier) - except Exception as e: - return None - - return project - - -def find_group(gitlab_instance, identifier): - try: - project = gitlab_instance.groups.get(identifier) - except Exception as e: - return None - - return project - - -def gitlab_authentication(module): - gitlab_url = module.params['api_url'] - validate_certs = module.params['validate_certs'] - gitlab_user = module.params['api_username'] - gitlab_password = module.params['api_password'] - gitlab_token = module.params['api_token'] - gitlab_oauth_token = module.params['api_oauth_token'] - gitlab_job_token = module.params['api_job_token'] - - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) - - try: - # python-gitlab library remove support for username/password authentication since 1.13.0 - # Changelog : https://github.com/python-gitlab/python-gitlab/releases/tag/v1.13.0 - # This condition allow to still support older version of the python-gitlab library - if LooseVersion(gitlab.__version__) < LooseVersion("1.13.0"): - gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, email=gitlab_user, password=gitlab_password, - private_token=gitlab_token, api_version=4) - else: - # We can create an oauth_token using a username and password - # https://docs.gitlab.com/ee/api/oauth2.html#authorization-code-flow - if gitlab_user: - data = {'grant_type': 'password', 'username': gitlab_user, 'password': gitlab_password} - resp = requests.post(urljoin(gitlab_url, "oauth/token"), data=data, verify=validate_certs) - resp_data = resp.json() - gitlab_oauth_token = resp_data["access_token"] - - gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, private_token=gitlab_token, - oauth_token=gitlab_oauth_token, job_token=gitlab_job_token, api_version=4) - - gitlab_instance.auth() - except (gitlab.exceptions.GitlabAuthenticationError, gitlab.exceptions.GitlabGetError) as e: - module.fail_json(msg="Failed to connect to GitLab server: %s" % to_native(e)) - except (gitlab.exceptions.GitlabHttpError) as e: - module.fail_json(msg="Failed to connect to GitLab server: %s. \ - GitLab remove Session API now that private tokens are removed from user API endpoints since version 10.2." % to_native(e)) - - return gitlab_instance diff --git a/ansible_collections/community/general/plugins/module_utils/identity/keycloak/keycloak.py b/ansible_collections/community/general/plugins/module_utils/identity/keycloak/keycloak.py deleted file mode 100644 index a856901b..00000000 --- a/ansible_collections/community/general/plugins/module_utils/identity/keycloak/keycloak.py +++ /dev/null @@ -1,1740 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2017, Eike Frost -# -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by Ansible -# still belong to the author of the module, and may assign their own license -# to the complete work. -# -# Redistribution and use in source and binary forms, with or without modification, -# are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -import json -import traceback - -from ansible.module_utils.urls import open_url -from ansible.module_utils.six.moves.urllib.parse import urlencode, quote -from ansible.module_utils.six.moves.urllib.error import HTTPError -from ansible.module_utils.common.text.converters import to_native, to_text - -URL_REALM_INFO = "{url}/realms/{realm}" -URL_REALMS = "{url}/admin/realms" -URL_REALM = "{url}/admin/realms/{realm}" - -URL_TOKEN = "{url}/realms/{realm}/protocol/openid-connect/token" -URL_CLIENT = "{url}/admin/realms/{realm}/clients/{id}" -URL_CLIENTS = "{url}/admin/realms/{realm}/clients" - -URL_CLIENT_ROLES = "{url}/admin/realms/{realm}/clients/{id}/roles" -URL_CLIENT_ROLE = "{url}/admin/realms/{realm}/clients/{id}/roles/{name}" -URL_CLIENT_ROLE_COMPOSITES = "{url}/admin/realms/{realm}/clients/{id}/roles/{name}/composites" - -URL_REALM_ROLES = "{url}/admin/realms/{realm}/roles" -URL_REALM_ROLE = "{url}/admin/realms/{realm}/roles/{name}" -URL_REALM_ROLE_COMPOSITES = "{url}/admin/realms/{realm}/roles/{name}/composites" - -URL_CLIENTTEMPLATE = "{url}/admin/realms/{realm}/client-templates/{id}" -URL_CLIENTTEMPLATES = "{url}/admin/realms/{realm}/client-templates" -URL_GROUPS = "{url}/admin/realms/{realm}/groups" -URL_GROUP = "{url}/admin/realms/{realm}/groups/{groupid}" - -URL_CLIENTSCOPES = "{url}/admin/realms/{realm}/client-scopes" -URL_CLIENTSCOPE = "{url}/admin/realms/{realm}/client-scopes/{id}" -URL_CLIENTSCOPE_PROTOCOLMAPPERS = "{url}/admin/realms/{realm}/client-scopes/{id}/protocol-mappers/models" -URL_CLIENTSCOPE_PROTOCOLMAPPER = "{url}/admin/realms/{realm}/client-scopes/{id}/protocol-mappers/models/{mapper_id}" - -URL_CLIENT_ROLEMAPPINGS = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}" -URL_CLIENT_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/available" -URL_CLIENT_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/composite" - -URL_AUTHENTICATION_FLOWS = "{url}/admin/realms/{realm}/authentication/flows" -URL_AUTHENTICATION_FLOW = "{url}/admin/realms/{realm}/authentication/flows/{id}" -URL_AUTHENTICATION_FLOW_COPY = "{url}/admin/realms/{realm}/authentication/flows/{copyfrom}/copy" -URL_AUTHENTICATION_FLOW_EXECUTIONS = "{url}/admin/realms/{realm}/authentication/flows/{flowalias}/executions" -URL_AUTHENTICATION_FLOW_EXECUTIONS_EXECUTION = "{url}/admin/realms/{realm}/authentication/flows/{flowalias}/executions/execution" -URL_AUTHENTICATION_FLOW_EXECUTIONS_FLOW = "{url}/admin/realms/{realm}/authentication/flows/{flowalias}/executions/flow" -URL_AUTHENTICATION_EXECUTION_CONFIG = "{url}/admin/realms/{realm}/authentication/executions/{id}/config" -URL_AUTHENTICATION_EXECUTION_RAISE_PRIORITY = "{url}/admin/realms/{realm}/authentication/executions/{id}/raise-priority" -URL_AUTHENTICATION_EXECUTION_LOWER_PRIORITY = "{url}/admin/realms/{realm}/authentication/executions/{id}/lower-priority" -URL_AUTHENTICATION_CONFIG = "{url}/admin/realms/{realm}/authentication/config/{id}" - -URL_IDENTITY_PROVIDERS = "{url}/admin/realms/{realm}/identity-provider/instances" -URL_IDENTITY_PROVIDER = "{url}/admin/realms/{realm}/identity-provider/instances/{alias}" -URL_IDENTITY_PROVIDER_MAPPERS = "{url}/admin/realms/{realm}/identity-provider/instances/{alias}/mappers" -URL_IDENTITY_PROVIDER_MAPPER = "{url}/admin/realms/{realm}/identity-provider/instances/{alias}/mappers/{id}" - -URL_COMPONENTS = "{url}/admin/realms/{realm}/components" -URL_COMPONENT = "{url}/admin/realms/{realm}/components/{id}" - - -def keycloak_argument_spec(): - """ - Returns argument_spec of options common to keycloak_*-modules - - :return: argument_spec dict - """ - return dict( - auth_keycloak_url=dict(type='str', aliases=['url'], required=True, no_log=False), - auth_client_id=dict(type='str', default='admin-cli'), - auth_realm=dict(type='str'), - auth_client_secret=dict(type='str', default=None, no_log=True), - auth_username=dict(type='str', aliases=['username']), - auth_password=dict(type='str', aliases=['password'], no_log=True), - validate_certs=dict(type='bool', default=True), - connection_timeout=dict(type='int', default=10), - token=dict(type='str', no_log=True), - ) - - -def camel(words): - return words.split('_')[0] + ''.join(x.capitalize() or '_' for x in words.split('_')[1:]) - - -class KeycloakError(Exception): - pass - - -def get_token(module_params): - """ Obtains connection header with token for the authentication, - token already given or obtained from credentials - :param module_params: parameters of the module - :return: connection header - """ - token = module_params.get('token') - base_url = module_params.get('auth_keycloak_url') - - if not base_url.lower().startswith(('http', 'https')): - raise KeycloakError("auth_url '%s' should either start with 'http' or 'https'." % base_url) - - if token is None: - base_url = module_params.get('auth_keycloak_url') - validate_certs = module_params.get('validate_certs') - auth_realm = module_params.get('auth_realm') - client_id = module_params.get('auth_client_id') - auth_username = module_params.get('auth_username') - auth_password = module_params.get('auth_password') - client_secret = module_params.get('auth_client_secret') - connection_timeout = module_params.get('connection_timeout') - auth_url = URL_TOKEN.format(url=base_url, realm=auth_realm) - temp_payload = { - 'grant_type': 'password', - 'client_id': client_id, - 'client_secret': client_secret, - 'username': auth_username, - 'password': auth_password, - } - # Remove empty items, for instance missing client_secret - payload = dict( - (k, v) for k, v in temp_payload.items() if v is not None) - try: - r = json.loads(to_native(open_url(auth_url, method='POST', - validate_certs=validate_certs, timeout=connection_timeout, - data=urlencode(payload)).read())) - except ValueError as e: - raise KeycloakError( - 'API returned invalid JSON when trying to obtain access token from %s: %s' - % (auth_url, str(e))) - except Exception as e: - raise KeycloakError('Could not obtain access token from %s: %s' - % (auth_url, str(e))) - - try: - token = r['access_token'] - except KeyError: - raise KeycloakError( - 'Could not obtain access token from %s' % auth_url) - return { - 'Authorization': 'Bearer ' + token, - 'Content-Type': 'application/json' - } - - -def is_struct_included(struct1, struct2, exclude=None): - """ - This function compare if the first parameter structure is included in the second. - The function use every elements of struct1 and validates they are present in the struct2 structure. - The two structure does not need to be equals for that function to return true. - Each elements are compared recursively. - :param struct1: - type: - dict for the initial call, can be dict, list, bool, int or str for recursive calls - description: - reference structure - :param struct2: - type: - dict for the initial call, can be dict, list, bool, int or str for recursive calls - description: - structure to compare with first parameter. - :param exclude: - type: - list - description: - Key to exclude from the comparison. - default: None - :return: - type: - bool - description: - Return True if all element of dict 1 are present in dict 2, return false otherwise. - """ - if isinstance(struct1, list) and isinstance(struct2, list): - for item1 in struct1: - if isinstance(item1, (list, dict)): - for item2 in struct2: - if not is_struct_included(item1, item2, exclude): - return False - else: - if item1 not in struct2: - return False - return True - elif isinstance(struct1, dict) and isinstance(struct2, dict): - try: - for key in struct1: - if not (exclude and key in exclude): - if not is_struct_included(struct1[key], struct2[key], exclude): - return False - return True - except KeyError: - return False - elif isinstance(struct1, bool) and isinstance(struct2, bool): - return struct1 == struct2 - else: - return to_text(struct1, 'utf-8') == to_text(struct2, 'utf-8') - - -class KeycloakAPI(object): - """ Keycloak API access; Keycloak uses OAuth 2.0 to protect its API, an access token for which - is obtained through OpenID connect - """ - def __init__(self, module, connection_header): - self.module = module - self.baseurl = self.module.params.get('auth_keycloak_url') - self.validate_certs = self.module.params.get('validate_certs') - self.connection_timeout = self.module.params.get('connection_timeout') - self.restheaders = connection_header - - def get_realm_info_by_id(self, realm='master'): - """ Obtain realm public info by id - - :param realm: realm id - :return: dict of real, representation or None if none matching exist - """ - realm_info_url = URL_REALM_INFO.format(url=self.baseurl, realm=realm) - - try: - return json.loads(to_native(open_url(realm_info_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) - - except HTTPError as e: - if e.code == 404: - return None - else: - self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)), - exception=traceback.format_exc()) - except ValueError as e: - self.module.fail_json(msg='API returned incorrect JSON when trying to obtain realm %s: %s' % (realm, str(e)), - exception=traceback.format_exc()) - except Exception as e: - self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)), - exception=traceback.format_exc()) - - def get_realm_by_id(self, realm='master'): - """ Obtain realm representation by id - - :param realm: realm id - :return: dict of real, representation or None if none matching exist - """ - realm_url = URL_REALM.format(url=self.baseurl, realm=realm) - - try: - return json.loads(to_native(open_url(realm_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) - - except HTTPError as e: - if e.code == 404: - return None - else: - self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)), - exception=traceback.format_exc()) - except ValueError as e: - self.module.fail_json(msg='API returned incorrect JSON when trying to obtain realm %s: %s' % (realm, str(e)), - exception=traceback.format_exc()) - except Exception as e: - self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)), - exception=traceback.format_exc()) - - def update_realm(self, realmrep, realm="master"): - """ Update an existing realm - :param realmrep: corresponding (partial/full) realm representation with updates - :param realm: realm to be updated in Keycloak - :return: HTTPResponse object on success - """ - realm_url = URL_REALM.format(url=self.baseurl, realm=realm) - - try: - return open_url(realm_url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(realmrep), validate_certs=self.validate_certs) - except Exception as e: - self.module.fail_json(msg='Could not update realm %s: %s' % (realm, str(e)), - exception=traceback.format_exc()) - - def create_realm(self, realmrep): - """ Create a realm in keycloak - :param realmrep: Realm representation of realm to be created. - :return: HTTPResponse object on success - """ - realm_url = URL_REALMS.format(url=self.baseurl) - - try: - return open_url(realm_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(realmrep), validate_certs=self.validate_certs) - except Exception as e: - self.module.fail_json(msg='Could not create realm %s: %s' % (realmrep['id'], str(e)), - exception=traceback.format_exc()) - - def delete_realm(self, realm="master"): - """ Delete a realm from Keycloak - - :param realm: realm to be deleted - :return: HTTPResponse object on success - """ - realm_url = URL_REALM.format(url=self.baseurl, realm=realm) - - try: - return open_url(realm_url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) - except Exception as e: - self.module.fail_json(msg='Could not delete realm %s: %s' % (realm, str(e)), - exception=traceback.format_exc()) - - def get_clients(self, realm='master', filter=None): - """ Obtains client representations for clients in a realm - - :param realm: realm to be queried - :param filter: if defined, only the client with clientId specified in the filter is returned - :return: list of dicts of client representations - """ - clientlist_url = URL_CLIENTS.format(url=self.baseurl, realm=realm) - if filter is not None: - clientlist_url += '?clientId=%s' % filter - - try: - return json.loads(to_native(open_url(clientlist_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) - except ValueError as e: - self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of clients for realm %s: %s' - % (realm, str(e))) - except Exception as e: - self.module.fail_json(msg='Could not obtain list of clients for realm %s: %s' - % (realm, str(e))) - - def get_client_by_clientid(self, client_id, realm='master'): - """ Get client representation by clientId - :param client_id: The clientId to be queried - :param realm: realm from which to obtain the client representation - :return: dict with a client representation or None if none matching exist - """ - r = self.get_clients(realm=realm, filter=client_id) - if len(r) > 0: - return r[0] - else: - return None - - def get_client_by_id(self, id, realm='master'): - """ Obtain client representation by id - - :param id: id (not clientId) of client to be queried - :param realm: client from this realm - :return: dict of client representation or None if none matching exist - """ - client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id) - - try: - return json.loads(to_native(open_url(client_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) - - except HTTPError as e: - if e.code == 404: - return None - else: - self.module.fail_json(msg='Could not obtain client %s for realm %s: %s' - % (id, realm, str(e))) - except ValueError as e: - self.module.fail_json(msg='API returned incorrect JSON when trying to obtain client %s for realm %s: %s' - % (id, realm, str(e))) - except Exception as e: - self.module.fail_json(msg='Could not obtain client %s for realm %s: %s' - % (id, realm, str(e))) - - def get_client_id(self, client_id, realm='master'): - """ Obtain id of client by client_id - - :param client_id: client_id of client to be queried - :param realm: client template from this realm - :return: id of client (usually a UUID) - """ - result = self.get_client_by_clientid(client_id, realm) - if isinstance(result, dict) and 'id' in result: - return result['id'] - else: - return None - - def update_client(self, id, clientrep, realm="master"): - """ Update an existing client - :param id: id (not clientId) of client to be updated in Keycloak - :param clientrep: corresponding (partial/full) client representation with updates - :param realm: realm the client is in - :return: HTTPResponse object on success - """ - client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id) - - try: - return open_url(client_url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(clientrep), validate_certs=self.validate_certs) - except Exception as e: - self.module.fail_json(msg='Could not update client %s in realm %s: %s' - % (id, realm, str(e))) - - def create_client(self, clientrep, realm="master"): - """ Create a client in keycloak - :param clientrep: Client representation of client to be created. Must at least contain field clientId. - :param realm: realm for client to be created. - :return: HTTPResponse object on success - """ - client_url = URL_CLIENTS.format(url=self.baseurl, realm=realm) - - try: - return open_url(client_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(clientrep), validate_certs=self.validate_certs) - except Exception as e: - self.module.fail_json(msg='Could not create client %s in realm %s: %s' - % (clientrep['clientId'], realm, str(e))) - - def delete_client(self, id, realm="master"): - """ Delete a client from Keycloak - - :param id: id (not clientId) of client to be deleted - :param realm: realm of client to be deleted - :return: HTTPResponse object on success - """ - client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id) - - try: - return open_url(client_url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) - except Exception as e: - self.module.fail_json(msg='Could not delete client %s in realm %s: %s' - % (id, realm, str(e))) - - def get_client_roles_by_id(self, cid, realm="master"): - """ Fetch the roles of the a client on the Keycloak server. - - :param cid: ID of the client from which to obtain the rolemappings. - :param realm: Realm from which to obtain the rolemappings. - :return: The rollemappings of specified group and client of the realm (default "master"). - """ - client_roles_url = URL_CLIENT_ROLES.format(url=self.baseurl, realm=realm, id=cid) - try: - return json.loads(to_native(open_url(client_roles_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) - except Exception as e: - self.module.fail_json(msg="Could not fetch rolemappings for client %s in realm %s: %s" - % (cid, realm, str(e))) - - def get_client_role_by_name(self, gid, cid, name, realm="master"): - """ Get the role ID of a client. - - :param gid: ID of the group from which to obtain the rolemappings. - :param cid: ID of the client from which to obtain the rolemappings. - :param name: Name of the role. - :param realm: Realm from which to obtain the rolemappings. - :return: The ID of the role, None if not found. - """ - rolemappings = self.get_client_roles_by_id(cid, realm=realm) - for role in rolemappings: - if name == role['name']: - return role['id'] - return None - - def get_client_rolemapping_by_id(self, gid, cid, rid, realm='master'): - """ Obtain client representation by id - - :param gid: ID of the group from which to obtain the rolemappings. - :param cid: ID of the client from which to obtain the rolemappings. - :param rid: ID of the role. - :param realm: client from this realm - :return: dict of rolemapping representation or None if none matching exist - """ - rolemappings_url = URL_CLIENT_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid) - try: - rolemappings = json.loads(to_native(open_url(rolemappings_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) - for role in rolemappings: - if rid == role['id']: - return role - except Exception as e: - self.module.fail_json(msg="Could not fetch rolemappings for client %s in group %s, realm %s: %s" - % (cid, gid, realm, str(e))) - return None - - def get_client_available_rolemappings(self, gid, cid, realm="master"): - """ Fetch the available role of a client in a specified goup on the Keycloak server. - - :param gid: ID of the group from which to obtain the rolemappings. - :param cid: ID of the client from which to obtain the rolemappings. - :param realm: Realm from which to obtain the rolemappings. - :return: The rollemappings of specified group and client of the realm (default "master"). - """ - available_rolemappings_url = URL_CLIENT_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=gid, client=cid) - try: - return json.loads(to_native(open_url(available_rolemappings_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) - except Exception as e: - self.module.fail_json(msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s" - % (cid, gid, realm, str(e))) - - def get_client_composite_rolemappings(self, gid, cid, realm="master"): - """ Fetch the composite role of a client in a specified group on the Keycloak server. - - :param gid: ID of the group from which to obtain the rolemappings. - :param cid: ID of the client from which to obtain the rolemappings. - :param realm: Realm from which to obtain the rolemappings. - :return: The rollemappings of specified group and client of the realm (default "master"). - """ - available_rolemappings_url = URL_CLIENT_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=gid, client=cid) - try: - return json.loads(to_native(open_url(available_rolemappings_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) - except Exception as e: - self.module.fail_json(msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s" - % (cid, gid, realm, str(e))) - - def add_group_rolemapping(self, gid, cid, role_rep, realm="master"): - """ Fetch the composite role of a client in a specified goup on the Keycloak server. - - :param gid: ID of the group from which to obtain the rolemappings. - :param cid: ID of the client from which to obtain the rolemappings. - :param role_rep: Representation of the role to assign. - :param realm: Realm from which to obtain the rolemappings. - :return: None. - """ - available_rolemappings_url = URL_CLIENT_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid) - try: - open_url(available_rolemappings_url, method="POST", headers=self.restheaders, data=json.dumps(role_rep), - validate_certs=self.validate_certs, timeout=self.connection_timeout) - except Exception as e: - self.module.fail_json(msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s" - % (cid, gid, realm, str(e))) - - def delete_group_rolemapping(self, gid, cid, role_rep, realm="master"): - """ Delete the rolemapping of a client in a specified group on the Keycloak server. - - :param gid: ID of the group from which to obtain the rolemappings. - :param cid: ID of the client from which to obtain the rolemappings. - :param role_rep: Representation of the role to assign. - :param realm: Realm from which to obtain the rolemappings. - :return: None. - """ - available_rolemappings_url = URL_CLIENT_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid) - try: - open_url(available_rolemappings_url, method="DELETE", headers=self.restheaders, - validate_certs=self.validate_certs, timeout=self.connection_timeout) - except Exception as e: - self.module.fail_json(msg="Could not delete available rolemappings for client %s in group %s, realm %s: %s" - % (cid, gid, realm, str(e))) - - def get_client_templates(self, realm='master'): - """ Obtains client template representations for client templates in a realm - - :param realm: realm to be queried - :return: list of dicts of client representations - """ - url = URL_CLIENTTEMPLATES.format(url=self.baseurl, realm=realm) - - try: - return json.loads(to_native(open_url(url, method='GET', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) - except ValueError as e: - self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of client templates for realm %s: %s' - % (realm, str(e))) - except Exception as e: - self.module.fail_json(msg='Could not obtain list of client templates for realm %s: %s' - % (realm, str(e))) - - def get_client_template_by_id(self, id, realm='master'): - """ Obtain client template representation by id - - :param id: id (not name) of client template to be queried - :param realm: client template from this realm - :return: dict of client template representation or None if none matching exist - """ - url = URL_CLIENTTEMPLATE.format(url=self.baseurl, id=id, realm=realm) - - try: - return json.loads(to_native(open_url(url, method='GET', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) - except ValueError as e: - self.module.fail_json(msg='API returned incorrect JSON when trying to obtain client templates %s for realm %s: %s' - % (id, realm, str(e))) - except Exception as e: - self.module.fail_json(msg='Could not obtain client template %s for realm %s: %s' - % (id, realm, str(e))) - - def get_client_template_by_name(self, name, realm='master'): - """ Obtain client template representation by name - - :param name: name of client template to be queried - :param realm: client template from this realm - :return: dict of client template representation or None if none matching exist - """ - result = self.get_client_templates(realm) - if isinstance(result, list): - result = [x for x in result if x['name'] == name] - if len(result) > 0: - return result[0] - return None - - def get_client_template_id(self, name, realm='master'): - """ Obtain client template id by name - - :param name: name of client template to be queried - :param realm: client template from this realm - :return: client template id (usually a UUID) - """ - result = self.get_client_template_by_name(name, realm) - if isinstance(result, dict) and 'id' in result: - return result['id'] - else: - return None - - def update_client_template(self, id, clienttrep, realm="master"): - """ Update an existing client template - :param id: id (not name) of client template to be updated in Keycloak - :param clienttrep: corresponding (partial/full) client template representation with updates - :param realm: realm the client template is in - :return: HTTPResponse object on success - """ - url = URL_CLIENTTEMPLATE.format(url=self.baseurl, realm=realm, id=id) - - try: - return open_url(url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(clienttrep), validate_certs=self.validate_certs) - except Exception as e: - self.module.fail_json(msg='Could not update client template %s in realm %s: %s' - % (id, realm, str(e))) - - def create_client_template(self, clienttrep, realm="master"): - """ Create a client in keycloak - :param clienttrep: Client template representation of client template to be created. Must at least contain field name - :param realm: realm for client template to be created in - :return: HTTPResponse object on success - """ - url = URL_CLIENTTEMPLATES.format(url=self.baseurl, realm=realm) - - try: - return open_url(url, method='POST', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(clienttrep), validate_certs=self.validate_certs) - except Exception as e: - self.module.fail_json(msg='Could not create client template %s in realm %s: %s' - % (clienttrep['clientId'], realm, str(e))) - - def delete_client_template(self, id, realm="master"): - """ Delete a client template from Keycloak - - :param id: id (not name) of client to be deleted - :param realm: realm of client template to be deleted - :return: HTTPResponse object on success - """ - url = URL_CLIENTTEMPLATE.format(url=self.baseurl, realm=realm, id=id) - - try: - return open_url(url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) - except Exception as e: - self.module.fail_json(msg='Could not delete client template %s in realm %s: %s' - % (id, realm, str(e))) - - def get_clientscopes(self, realm="master"): - """ Fetch the name and ID of all clientscopes on the Keycloak server. - - To fetch the full data of the group, make a subsequent call to - get_clientscope_by_clientscopeid, passing in the ID of the group you wish to return. - - :param realm: Realm in which the clientscope resides; default 'master'. - :return The clientscopes of this realm (default "master") - """ - clientscopes_url = URL_CLIENTSCOPES.format(url=self.baseurl, realm=realm) - try: - return json.loads(to_native(open_url(clientscopes_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) - except Exception as e: - self.module.fail_json(msg="Could not fetch list of clientscopes in realm %s: %s" - % (realm, str(e))) - - def get_clientscope_by_clientscopeid(self, cid, realm="master"): - """ Fetch a keycloak clientscope from the provided realm using the clientscope's unique ID. - - If the clientscope does not exist, None is returned. - - gid is a UUID provided by the Keycloak API - :param cid: UUID of the clientscope to be returned - :param realm: Realm in which the clientscope resides; default 'master'. - """ - clientscope_url = URL_CLIENTSCOPE.format(url=self.baseurl, realm=realm, id=cid) - try: - return json.loads(to_native(open_url(clientscope_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) - - except HTTPError as e: - if e.code == 404: - return None - else: - self.module.fail_json(msg="Could not fetch clientscope %s in realm %s: %s" - % (cid, realm, str(e))) - except Exception as e: - self.module.fail_json(msg="Could not clientscope group %s in realm %s: %s" - % (cid, realm, str(e))) - - def get_clientscope_by_name(self, name, realm="master"): - """ Fetch a keycloak clientscope within a realm based on its name. - - The Keycloak API does not allow filtering of the clientscopes resource by name. - As a result, this method first retrieves the entire list of clientscopes - name and ID - - then performs a second query to fetch the group. - - If the clientscope does not exist, None is returned. - :param name: Name of the clientscope to fetch. - :param realm: Realm in which the clientscope resides; default 'master' - """ - try: - all_clientscopes = self.get_clientscopes(realm=realm) - - for clientscope in all_clientscopes: - if clientscope['name'] == name: - return self.get_clientscope_by_clientscopeid(clientscope['id'], realm=realm) - - return None - - except Exception as e: - self.module.fail_json(msg="Could not fetch clientscope %s in realm %s: %s" - % (name, realm, str(e))) - - def create_clientscope(self, clientscoperep, realm="master"): - """ Create a Keycloak clientscope. - - :param clientscoperep: a ClientScopeRepresentation of the clientscope to be created. Must contain at minimum the field name. - :return: HTTPResponse object on success - """ - clientscopes_url = URL_CLIENTSCOPES.format(url=self.baseurl, realm=realm) - try: - return open_url(clientscopes_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(clientscoperep), validate_certs=self.validate_certs) - except Exception as e: - self.module.fail_json(msg="Could not create clientscope %s in realm %s: %s" - % (clientscoperep['name'], realm, str(e))) - - def update_clientscope(self, clientscoperep, realm="master"): - """ Update an existing clientscope. - - :param grouprep: A GroupRepresentation of the updated group. - :return HTTPResponse object on success - """ - clientscope_url = URL_CLIENTSCOPE.format(url=self.baseurl, realm=realm, id=clientscoperep['id']) - - try: - return open_url(clientscope_url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(clientscoperep), validate_certs=self.validate_certs) - - except Exception as e: - self.module.fail_json(msg='Could not update clientscope %s in realm %s: %s' - % (clientscoperep['name'], realm, str(e))) - - def delete_clientscope(self, name=None, cid=None, realm="master"): - """ Delete a clientscope. One of name or cid must be provided. - - Providing the clientscope ID is preferred as it avoids a second lookup to - convert a clientscope name to an ID. - - :param name: The name of the clientscope. A lookup will be performed to retrieve the clientscope ID. - :param cid: The ID of the clientscope (preferred to name). - :param realm: The realm in which this group resides, default "master". - """ - - if cid is None and name is None: - # prefer an exception since this is almost certainly a programming error in the module itself. - raise Exception("Unable to delete group - one of group ID or name must be provided.") - - # only lookup the name if cid isn't provided. - # in the case that both are provided, prefer the ID, since it's one - # less lookup. - if cid is None and name is not None: - for clientscope in self.get_clientscopes(realm=realm): - if clientscope['name'] == name: - cid = clientscope['id'] - break - - # if the group doesn't exist - no problem, nothing to delete. - if cid is None: - return None - - # should have a good cid by here. - clientscope_url = URL_CLIENTSCOPE.format(realm=realm, id=cid, url=self.baseurl) - try: - return open_url(clientscope_url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) - - except Exception as e: - self.module.fail_json(msg="Unable to delete clientscope %s: %s" % (cid, str(e))) - - def get_clientscope_protocolmappers(self, cid, realm="master"): - """ Fetch the name and ID of all clientscopes on the Keycloak server. - - To fetch the full data of the group, make a subsequent call to - get_clientscope_by_clientscopeid, passing in the ID of the group you wish to return. - - :param cid: id of clientscope (not name). - :param realm: Realm in which the clientscope resides; default 'master'. - :return The protocolmappers of this realm (default "master") - """ - protocolmappers_url = URL_CLIENTSCOPE_PROTOCOLMAPPERS.format(id=cid, url=self.baseurl, realm=realm) - try: - return json.loads(to_native(open_url(protocolmappers_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) - except Exception as e: - self.module.fail_json(msg="Could not fetch list of protocolmappers in realm %s: %s" - % (realm, str(e))) - - def get_clientscope_protocolmapper_by_protocolmapperid(self, pid, cid, realm="master"): - """ Fetch a keycloak clientscope from the provided realm using the clientscope's unique ID. - - If the clientscope does not exist, None is returned. - - gid is a UUID provided by the Keycloak API - - :param cid: UUID of the protocolmapper to be returned - :param cid: UUID of the clientscope to be returned - :param realm: Realm in which the clientscope resides; default 'master'. - """ - protocolmapper_url = URL_CLIENTSCOPE_PROTOCOLMAPPER.format(url=self.baseurl, realm=realm, id=cid, mapper_id=pid) - try: - return json.loads(to_native(open_url(protocolmapper_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) - - except HTTPError as e: - if e.code == 404: - return None - else: - self.module.fail_json(msg="Could not fetch protocolmapper %s in realm %s: %s" - % (pid, realm, str(e))) - except Exception as e: - self.module.fail_json(msg="Could not fetch protocolmapper %s in realm %s: %s" - % (cid, realm, str(e))) - - def get_clientscope_protocolmapper_by_name(self, cid, name, realm="master"): - """ Fetch a keycloak clientscope within a realm based on its name. - - The Keycloak API does not allow filtering of the clientscopes resource by name. - As a result, this method first retrieves the entire list of clientscopes - name and ID - - then performs a second query to fetch the group. - - If the clientscope does not exist, None is returned. - :param cid: Id of the clientscope (not name). - :param name: Name of the protocolmapper to fetch. - :param realm: Realm in which the clientscope resides; default 'master' - """ - try: - all_protocolmappers = self.get_clientscope_protocolmappers(cid, realm=realm) - - for protocolmapper in all_protocolmappers: - if protocolmapper['name'] == name: - return self.get_clientscope_protocolmapper_by_protocolmapperid(protocolmapper['id'], cid, realm=realm) - - return None - - except Exception as e: - self.module.fail_json(msg="Could not fetch protocolmapper %s in realm %s: %s" - % (name, realm, str(e))) - - def create_clientscope_protocolmapper(self, cid, mapper_rep, realm="master"): - """ Create a Keycloak clientscope protocolmapper. - - :param cid: Id of the clientscope. - :param mapper_rep: a ProtocolMapperRepresentation of the protocolmapper to be created. Must contain at minimum the field name. - :return: HTTPResponse object on success - """ - protocolmappers_url = URL_CLIENTSCOPE_PROTOCOLMAPPERS.format(url=self.baseurl, id=cid, realm=realm) - try: - return open_url(protocolmappers_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(mapper_rep), validate_certs=self.validate_certs) - except Exception as e: - self.module.fail_json(msg="Could not create protocolmapper %s in realm %s: %s" - % (mapper_rep['name'], realm, str(e))) - - def update_clientscope_protocolmappers(self, cid, mapper_rep, realm="master"): - """ Update an existing clientscope. - - :param cid: Id of the clientscope. - :param mapper_rep: A ProtocolMapperRepresentation of the updated protocolmapper. - :return HTTPResponse object on success - """ - protocolmapper_url = URL_CLIENTSCOPE_PROTOCOLMAPPER.format(url=self.baseurl, realm=realm, id=cid, mapper_id=mapper_rep['id']) - - try: - return open_url(protocolmapper_url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(mapper_rep), validate_certs=self.validate_certs) - - except Exception as e: - self.module.fail_json(msg='Could not update protocolmappers for clientscope %s in realm %s: %s' - % (mapper_rep, realm, str(e))) - - def get_groups(self, realm="master"): - """ Fetch the name and ID of all groups on the Keycloak server. - - To fetch the full data of the group, make a subsequent call to - get_group_by_groupid, passing in the ID of the group you wish to return. - - :param realm: Return the groups of this realm (default "master"). - """ - groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm) - try: - return json.loads(to_native(open_url(groups_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) - except Exception as e: - self.module.fail_json(msg="Could not fetch list of groups in realm %s: %s" - % (realm, str(e))) - - def get_group_by_groupid(self, gid, realm="master"): - """ Fetch a keycloak group from the provided realm using the group's unique ID. - - If the group does not exist, None is returned. - - gid is a UUID provided by the Keycloak API - :param gid: UUID of the group to be returned - :param realm: Realm in which the group resides; default 'master'. - """ - groups_url = URL_GROUP.format(url=self.baseurl, realm=realm, groupid=gid) - try: - return json.loads(to_native(open_url(groups_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) - - except HTTPError as e: - if e.code == 404: - return None - else: - self.module.fail_json(msg="Could not fetch group %s in realm %s: %s" - % (gid, realm, str(e))) - except Exception as e: - self.module.fail_json(msg="Could not fetch group %s in realm %s: %s" - % (gid, realm, str(e))) - - def get_group_by_name(self, name, realm="master"): - """ Fetch a keycloak group within a realm based on its name. - - The Keycloak API does not allow filtering of the Groups resource by name. - As a result, this method first retrieves the entire list of groups - name and ID - - then performs a second query to fetch the group. - - If the group does not exist, None is returned. - :param name: Name of the group to fetch. - :param realm: Realm in which the group resides; default 'master' - """ - groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm) - try: - all_groups = self.get_groups(realm=realm) - - for group in all_groups: - if group['name'] == name: - return self.get_group_by_groupid(group['id'], realm=realm) - - return None - - except Exception as e: - self.module.fail_json(msg="Could not fetch group %s in realm %s: %s" - % (name, realm, str(e))) - - def create_group(self, grouprep, realm="master"): - """ Create a Keycloak group. - - :param grouprep: a GroupRepresentation of the group to be created. Must contain at minimum the field name. - :return: HTTPResponse object on success - """ - groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm) - try: - return open_url(groups_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(grouprep), validate_certs=self.validate_certs) - except Exception as e: - self.module.fail_json(msg="Could not create group %s in realm %s: %s" - % (grouprep['name'], realm, str(e))) - - def update_group(self, grouprep, realm="master"): - """ Update an existing group. - - :param grouprep: A GroupRepresentation of the updated group. - :return HTTPResponse object on success - """ - group_url = URL_GROUP.format(url=self.baseurl, realm=realm, groupid=grouprep['id']) - - try: - return open_url(group_url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(grouprep), validate_certs=self.validate_certs) - except Exception as e: - self.module.fail_json(msg='Could not update group %s in realm %s: %s' - % (grouprep['name'], realm, str(e))) - - def delete_group(self, name=None, groupid=None, realm="master"): - """ Delete a group. One of name or groupid must be provided. - - Providing the group ID is preferred as it avoids a second lookup to - convert a group name to an ID. - - :param name: The name of the group. A lookup will be performed to retrieve the group ID. - :param groupid: The ID of the group (preferred to name). - :param realm: The realm in which this group resides, default "master". - """ - - if groupid is None and name is None: - # prefer an exception since this is almost certainly a programming error in the module itself. - raise Exception("Unable to delete group - one of group ID or name must be provided.") - - # only lookup the name if groupid isn't provided. - # in the case that both are provided, prefer the ID, since it's one - # less lookup. - if groupid is None and name is not None: - for group in self.get_groups(realm=realm): - if group['name'] == name: - groupid = group['id'] - break - - # if the group doesn't exist - no problem, nothing to delete. - if groupid is None: - return None - - # should have a good groupid by here. - group_url = URL_GROUP.format(realm=realm, groupid=groupid, url=self.baseurl) - try: - return open_url(group_url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) - except Exception as e: - self.module.fail_json(msg="Unable to delete group %s: %s" % (groupid, str(e))) - - def get_realm_roles(self, realm='master'): - """ Obtains role representations for roles in a realm - - :param realm: realm to be queried - :return: list of dicts of role representations - """ - rolelist_url = URL_REALM_ROLES.format(url=self.baseurl, realm=realm) - try: - return json.loads(to_native(open_url(rolelist_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) - except ValueError as e: - self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of roles for realm %s: %s' - % (realm, str(e))) - except Exception as e: - self.module.fail_json(msg='Could not obtain list of roles for realm %s: %s' - % (realm, str(e))) - - def get_realm_role(self, name, realm='master'): - """ Fetch a keycloak role from the provided realm using the role's name. - - If the role does not exist, None is returned. - :param name: Name of the role to fetch. - :param realm: Realm in which the role resides; default 'master'. - """ - role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name)) - try: - return json.loads(to_native(open_url(role_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) - except HTTPError as e: - if e.code == 404: - return None - else: - self.module.fail_json(msg='Could not fetch role %s in realm %s: %s' - % (name, realm, str(e))) - except Exception as e: - self.module.fail_json(msg='Could not fetch role %s in realm %s: %s' - % (name, realm, str(e))) - - def create_realm_role(self, rolerep, realm='master'): - """ Create a Keycloak realm role. - - :param rolerep: a RoleRepresentation of the role to be created. Must contain at minimum the field name. - :return: HTTPResponse object on success - """ - roles_url = URL_REALM_ROLES.format(url=self.baseurl, realm=realm) - try: - return open_url(roles_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(rolerep), validate_certs=self.validate_certs) - except Exception as e: - self.module.fail_json(msg='Could not create role %s in realm %s: %s' - % (rolerep['name'], realm, str(e))) - - def update_realm_role(self, rolerep, realm='master'): - """ Update an existing realm role. - - :param rolerep: A RoleRepresentation of the updated role. - :return HTTPResponse object on success - """ - role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(rolerep['name'])) - try: - return open_url(role_url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(rolerep), validate_certs=self.validate_certs) - except Exception as e: - self.module.fail_json(msg='Could not update role %s in realm %s: %s' - % (rolerep['name'], realm, str(e))) - - def delete_realm_role(self, name, realm='master'): - """ Delete a realm role. - - :param name: The name of the role. - :param realm: The realm in which this role resides, default "master". - """ - role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name)) - try: - return open_url(role_url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) - except Exception as e: - self.module.fail_json(msg='Unable to delete role %s in realm %s: %s' - % (name, realm, str(e))) - - def get_client_roles(self, clientid, realm='master'): - """ Obtains role representations for client roles in a specific client - - :param clientid: Client id to be queried - :param realm: Realm to be queried - :return: List of dicts of role representations - """ - cid = self.get_client_id(clientid, realm=realm) - if cid is None: - self.module.fail_json(msg='Could not find client %s in realm %s' - % (clientid, realm)) - rolelist_url = URL_CLIENT_ROLES.format(url=self.baseurl, realm=realm, id=cid) - try: - return json.loads(to_native(open_url(rolelist_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) - except ValueError as e: - self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of roles for client %s in realm %s: %s' - % (clientid, realm, str(e))) - except Exception as e: - self.module.fail_json(msg='Could not obtain list of roles for client %s in realm %s: %s' - % (clientid, realm, str(e))) - - def get_client_role(self, name, clientid, realm='master'): - """ Fetch a keycloak client role from the provided realm using the role's name. - - :param name: Name of the role to fetch. - :param clientid: Client id for the client role - :param realm: Realm in which the role resides - :return: Dict of role representation - If the role does not exist, None is returned. - """ - cid = self.get_client_id(clientid, realm=realm) - if cid is None: - self.module.fail_json(msg='Could not find client %s in realm %s' - % (clientid, realm)) - role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name)) - try: - return json.loads(to_native(open_url(role_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) - except HTTPError as e: - if e.code == 404: - return None - else: - self.module.fail_json(msg='Could not fetch role %s in client %s of realm %s: %s' - % (name, clientid, realm, str(e))) - except Exception as e: - self.module.fail_json(msg='Could not fetch role %s for client %s in realm %s: %s' - % (name, clientid, realm, str(e))) - - def create_client_role(self, rolerep, clientid, realm='master'): - """ Create a Keycloak client role. - - :param rolerep: a RoleRepresentation of the role to be created. Must contain at minimum the field name. - :param clientid: Client id for the client role - :param realm: Realm in which the role resides - :return: HTTPResponse object on success - """ - cid = self.get_client_id(clientid, realm=realm) - if cid is None: - self.module.fail_json(msg='Could not find client %s in realm %s' - % (clientid, realm)) - roles_url = URL_CLIENT_ROLES.format(url=self.baseurl, realm=realm, id=cid) - try: - return open_url(roles_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(rolerep), validate_certs=self.validate_certs) - except Exception as e: - self.module.fail_json(msg='Could not create role %s for client %s in realm %s: %s' - % (rolerep['name'], clientid, realm, str(e))) - - def update_client_role(self, rolerep, clientid, realm="master"): - """ Update an existing client role. - - :param rolerep: A RoleRepresentation of the updated role. - :param clientid: Client id for the client role - :param realm: Realm in which the role resides - :return HTTPResponse object on success - """ - cid = self.get_client_id(clientid, realm=realm) - if cid is None: - self.module.fail_json(msg='Could not find client %s in realm %s' - % (clientid, realm)) - role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(rolerep['name'])) - try: - return open_url(role_url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(rolerep), validate_certs=self.validate_certs) - except Exception as e: - self.module.fail_json(msg='Could not update role %s for client %s in realm %s: %s' - % (rolerep['name'], clientid, realm, str(e))) - - def delete_client_role(self, name, clientid, realm="master"): - """ Delete a role. One of name or roleid must be provided. - - :param name: The name of the role. - :param clientid: Client id for the client role - :param realm: Realm in which the role resides - """ - cid = self.get_client_id(clientid, realm=realm) - if cid is None: - self.module.fail_json(msg='Could not find client %s in realm %s' - % (clientid, realm)) - role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name)) - try: - return open_url(role_url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) - except Exception as e: - self.module.fail_json(msg='Unable to delete role %s for client %s in realm %s: %s' - % (name, clientid, realm, str(e))) - - def get_authentication_flow_by_alias(self, alias, realm='master'): - """ - Get an authentication flow by it's alias - :param alias: Alias of the authentication flow to get. - :param realm: Realm. - :return: Authentication flow representation. - """ - try: - authentication_flow = {} - # Check if the authentication flow exists on the Keycloak serveraders - authentications = json.load(open_url(URL_AUTHENTICATION_FLOWS.format(url=self.baseurl, realm=realm), method='GET', - headers=self.restheaders, timeout=self.connection_timeout)) - for authentication in authentications: - if authentication["alias"] == alias: - authentication_flow = authentication - break - return authentication_flow - except Exception as e: - self.module.fail_json(msg="Unable get authentication flow %s: %s" % (alias, str(e))) - - def delete_authentication_flow_by_id(self, id, realm='master'): - """ - Delete an authentication flow from Keycloak - :param id: id of authentication flow to be deleted - :param realm: realm of client to be deleted - :return: HTTPResponse object on success - """ - flow_url = URL_AUTHENTICATION_FLOW.format(url=self.baseurl, realm=realm, id=id) - - try: - return open_url(flow_url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) - except Exception as e: - self.module.fail_json(msg='Could not delete authentication flow %s in realm %s: %s' - % (id, realm, str(e))) - - def copy_auth_flow(self, config, realm='master'): - """ - Create a new authentication flow from a copy of another. - :param config: Representation of the authentication flow to create. - :param realm: Realm. - :return: Representation of the new authentication flow. - """ - try: - new_name = dict( - newName=config["alias"] - ) - open_url( - URL_AUTHENTICATION_FLOW_COPY.format( - url=self.baseurl, - realm=realm, - copyfrom=quote(config["copyFrom"])), - method='POST', - headers=self.restheaders, - data=json.dumps(new_name), - timeout=self.connection_timeout) - flow_list = json.load( - open_url( - URL_AUTHENTICATION_FLOWS.format(url=self.baseurl, - realm=realm), - method='GET', - headers=self.restheaders, - timeout=self.connection_timeout)) - for flow in flow_list: - if flow["alias"] == config["alias"]: - return flow - return None - except Exception as e: - self.module.fail_json(msg='Could not copy authentication flow %s in realm %s: %s' - % (config["alias"], realm, str(e))) - - def create_empty_auth_flow(self, config, realm='master'): - """ - Create a new empty authentication flow. - :param config: Representation of the authentication flow to create. - :param realm: Realm. - :return: Representation of the new authentication flow. - """ - try: - new_flow = dict( - alias=config["alias"], - providerId=config["providerId"], - description=config["description"], - topLevel=True - ) - open_url( - URL_AUTHENTICATION_FLOWS.format( - url=self.baseurl, - realm=realm), - method='POST', - headers=self.restheaders, - data=json.dumps(new_flow), - timeout=self.connection_timeout) - flow_list = json.load( - open_url( - URL_AUTHENTICATION_FLOWS.format( - url=self.baseurl, - realm=realm), - method='GET', - headers=self.restheaders, - timeout=self.connection_timeout)) - for flow in flow_list: - if flow["alias"] == config["alias"]: - return flow - return None - except Exception as e: - self.module.fail_json(msg='Could not create empty authentication flow %s in realm %s: %s' - % (config["alias"], realm, str(e))) - - def update_authentication_executions(self, flowAlias, updatedExec, realm='master'): - """ Update authentication executions - - :param flowAlias: name of the parent flow - :param updatedExec: JSON containing updated execution - :return: HTTPResponse object on success - """ - try: - open_url( - URL_AUTHENTICATION_FLOW_EXECUTIONS.format( - url=self.baseurl, - realm=realm, - flowalias=quote(flowAlias)), - method='PUT', - headers=self.restheaders, - data=json.dumps(updatedExec), - timeout=self.connection_timeout) - except Exception as e: - self.module.fail_json(msg="Unable to update executions %s: %s" % (updatedExec, str(e))) - - def add_authenticationConfig_to_execution(self, executionId, authenticationConfig, realm='master'): - """ Add autenticatorConfig to the execution - - :param executionId: id of execution - :param authenticationConfig: config to add to the execution - :return: HTTPResponse object on success - """ - try: - open_url( - URL_AUTHENTICATION_EXECUTION_CONFIG.format( - url=self.baseurl, - realm=realm, - id=executionId), - method='POST', - headers=self.restheaders, - data=json.dumps(authenticationConfig), - timeout=self.connection_timeout) - except Exception as e: - self.module.fail_json(msg="Unable to add authenticationConfig %s: %s" % (executionId, str(e))) - - def create_subflow(self, subflowName, flowAlias, realm='master'): - """ Create new sublow on the flow - - :param subflowName: name of the subflow to create - :param flowAlias: name of the parent flow - :return: HTTPResponse object on success - """ - try: - newSubFlow = {} - newSubFlow["alias"] = subflowName - newSubFlow["provider"] = "registration-page-form" - newSubFlow["type"] = "basic-flow" - open_url( - URL_AUTHENTICATION_FLOW_EXECUTIONS_FLOW.format( - url=self.baseurl, - realm=realm, - flowalias=quote(flowAlias)), - method='POST', - headers=self.restheaders, - data=json.dumps(newSubFlow), - timeout=self.connection_timeout) - except Exception as e: - self.module.fail_json(msg="Unable to create new subflow %s: %s" % (subflowName, str(e))) - - def create_execution(self, execution, flowAlias, realm='master'): - """ Create new execution on the flow - - :param execution: name of execution to create - :param flowAlias: name of the parent flow - :return: HTTPResponse object on success - """ - try: - newExec = {} - newExec["provider"] = execution["providerId"] - newExec["requirement"] = execution["requirement"] - open_url( - URL_AUTHENTICATION_FLOW_EXECUTIONS_EXECUTION.format( - url=self.baseurl, - realm=realm, - flowalias=quote(flowAlias)), - method='POST', - headers=self.restheaders, - data=json.dumps(newExec), - timeout=self.connection_timeout) - except Exception as e: - self.module.fail_json(msg="Unable to create new execution %s: %s" % (execution["provider"], str(e))) - - def change_execution_priority(self, executionId, diff, realm='master'): - """ Raise or lower execution priority of diff time - - :param executionId: id of execution to lower priority - :param realm: realm the client is in - :param diff: Integer number, raise of diff time if positive lower of diff time if negative - :return: HTTPResponse object on success - """ - try: - if diff > 0: - for i in range(diff): - open_url( - URL_AUTHENTICATION_EXECUTION_RAISE_PRIORITY.format( - url=self.baseurl, - realm=realm, - id=executionId), - method='POST', - headers=self.restheaders, - timeout=self.connection_timeout) - elif diff < 0: - for i in range(-diff): - open_url( - URL_AUTHENTICATION_EXECUTION_LOWER_PRIORITY.format( - url=self.baseurl, - realm=realm, - id=executionId), - method='POST', - headers=self.restheaders, - timeout=self.connection_timeout) - except Exception as e: - self.module.fail_json(msg="Unable to change execution priority %s: %s" % (executionId, str(e))) - - def get_executions_representation(self, config, realm='master'): - """ - Get a representation of the executions for an authentication flow. - :param config: Representation of the authentication flow - :param realm: Realm - :return: Representation of the executions - """ - try: - # Get executions created - executions = json.load( - open_url( - URL_AUTHENTICATION_FLOW_EXECUTIONS.format( - url=self.baseurl, - realm=realm, - flowalias=quote(config["alias"])), - method='GET', - headers=self.restheaders, - timeout=self.connection_timeout)) - for execution in executions: - if "authenticationConfig" in execution: - execConfigId = execution["authenticationConfig"] - execConfig = json.load( - open_url( - URL_AUTHENTICATION_CONFIG.format( - url=self.baseurl, - realm=realm, - id=execConfigId), - method='GET', - headers=self.restheaders, - timeout=self.connection_timeout)) - execution["authenticationConfig"] = execConfig - return executions - except Exception as e: - self.module.fail_json(msg='Could not get executions for authentication flow %s in realm %s: %s' - % (config["alias"], realm, str(e))) - - def get_identity_providers(self, realm='master'): - """ Fetch representations for identity providers in a realm - :param realm: realm to be queried - :return: list of representations for identity providers - """ - idps_url = URL_IDENTITY_PROVIDERS.format(url=self.baseurl, realm=realm) - try: - return json.loads(to_native(open_url(idps_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) - except ValueError as e: - self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of identity providers for realm %s: %s' - % (realm, str(e))) - except Exception as e: - self.module.fail_json(msg='Could not obtain list of identity providers for realm %s: %s' - % (realm, str(e))) - - def get_identity_provider(self, alias, realm='master'): - """ Fetch identity provider representation from a realm using the idp's alias. - If the identity provider does not exist, None is returned. - :param alias: Alias of the identity provider to fetch. - :param realm: Realm in which the identity provider resides; default 'master'. - """ - idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=alias) - try: - return json.loads(to_native(open_url(idp_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) - except HTTPError as e: - if e.code == 404: - return None - else: - self.module.fail_json(msg='Could not fetch identity provider %s in realm %s: %s' - % (alias, realm, str(e))) - except Exception as e: - self.module.fail_json(msg='Could not fetch identity provider %s in realm %s: %s' - % (alias, realm, str(e))) - - def create_identity_provider(self, idprep, realm='master'): - """ Create an identity provider. - :param idprep: Identity provider representation of the idp to be created. - :param realm: Realm in which this identity provider resides, default "master". - :return: HTTPResponse object on success - """ - idps_url = URL_IDENTITY_PROVIDERS.format(url=self.baseurl, realm=realm) - try: - return open_url(idps_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(idprep), validate_certs=self.validate_certs) - except Exception as e: - self.module.fail_json(msg='Could not create identity provider %s in realm %s: %s' - % (idprep['alias'], realm, str(e))) - - def update_identity_provider(self, idprep, realm='master'): - """ Update an existing identity provider. - :param idprep: Identity provider representation of the idp to be updated. - :param realm: Realm in which this identity provider resides, default "master". - :return HTTPResponse object on success - """ - idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=idprep['alias']) - try: - return open_url(idp_url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(idprep), validate_certs=self.validate_certs) - except Exception as e: - self.module.fail_json(msg='Could not update identity provider %s in realm %s: %s' - % (idprep['alias'], realm, str(e))) - - def delete_identity_provider(self, alias, realm='master'): - """ Delete an identity provider. - :param alias: Alias of the identity provider. - :param realm: Realm in which this identity provider resides, default "master". - """ - idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=alias) - try: - return open_url(idp_url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) - except Exception as e: - self.module.fail_json(msg='Unable to delete identity provider %s in realm %s: %s' - % (alias, realm, str(e))) - - def get_identity_provider_mappers(self, alias, realm='master'): - """ Fetch representations for identity provider mappers - :param alias: Alias of the identity provider. - :param realm: realm to be queried - :return: list of representations for identity provider mappers - """ - mappers_url = URL_IDENTITY_PROVIDER_MAPPERS.format(url=self.baseurl, realm=realm, alias=alias) - try: - return json.loads(to_native(open_url(mappers_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) - except ValueError as e: - self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of identity provider mappers for idp %s in realm %s: %s' - % (alias, realm, str(e))) - except Exception as e: - self.module.fail_json(msg='Could not obtain list of identity provider mappers for idp %s in realm %s: %s' - % (alias, realm, str(e))) - - def get_identity_provider_mapper(self, mid, alias, realm='master'): - """ Fetch identity provider representation from a realm using the idp's alias. - If the identity provider does not exist, None is returned. - :param mid: Unique ID of the mapper to fetch. - :param alias: Alias of the identity provider. - :param realm: Realm in which the identity provider resides; default 'master'. - """ - mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mid) - try: - return json.loads(to_native(open_url(mapper_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) - except HTTPError as e: - if e.code == 404: - return None - else: - self.module.fail_json(msg='Could not fetch mapper %s for identity provider %s in realm %s: %s' - % (mid, alias, realm, str(e))) - except Exception as e: - self.module.fail_json(msg='Could not fetch mapper %s for identity provider %s in realm %s: %s' - % (mid, alias, realm, str(e))) - - def create_identity_provider_mapper(self, mapper, alias, realm='master'): - """ Create an identity provider mapper. - :param mapper: IdentityProviderMapperRepresentation of the mapper to be created. - :param alias: Alias of the identity provider. - :param realm: Realm in which this identity provider resides, default "master". - :return: HTTPResponse object on success - """ - mappers_url = URL_IDENTITY_PROVIDER_MAPPERS.format(url=self.baseurl, realm=realm, alias=alias) - try: - return open_url(mappers_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(mapper), validate_certs=self.validate_certs) - except Exception as e: - self.module.fail_json(msg='Could not create identity provider mapper %s for idp %s in realm %s: %s' - % (mapper['name'], alias, realm, str(e))) - - def update_identity_provider_mapper(self, mapper, alias, realm='master'): - """ Update an existing identity provider. - :param mapper: IdentityProviderMapperRepresentation of the mapper to be updated. - :param alias: Alias of the identity provider. - :param realm: Realm in which this identity provider resides, default "master". - :return HTTPResponse object on success - """ - mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mapper['id']) - try: - return open_url(mapper_url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(mapper), validate_certs=self.validate_certs) - except Exception as e: - self.module.fail_json(msg='Could not update mapper %s for identity provider %s in realm %s: %s' - % (mapper['id'], alias, realm, str(e))) - - def delete_identity_provider_mapper(self, mid, alias, realm='master'): - """ Delete an identity provider. - :param mid: Unique ID of the mapper to delete. - :param alias: Alias of the identity provider. - :param realm: Realm in which this identity provider resides, default "master". - """ - mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mid) - try: - return open_url(mapper_url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) - except Exception as e: - self.module.fail_json(msg='Unable to delete mapper %s for identity provider %s in realm %s: %s' - % (mid, alias, realm, str(e))) - - def get_components(self, filter=None, realm='master'): - """ Fetch representations for components in a realm - :param realm: realm to be queried - :param filter: search filter - :return: list of representations for components - """ - comps_url = URL_COMPONENTS.format(url=self.baseurl, realm=realm) - if filter is not None: - comps_url += '?%s' % filter - - try: - return json.loads(to_native(open_url(comps_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) - except ValueError as e: - self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of components for realm %s: %s' - % (realm, str(e))) - except Exception as e: - self.module.fail_json(msg='Could not obtain list of components for realm %s: %s' - % (realm, str(e))) - - def get_component(self, cid, realm='master'): - """ Fetch component representation from a realm using its cid. - If the component does not exist, None is returned. - :param cid: Unique ID of the component to fetch. - :param realm: Realm in which the component resides; default 'master'. - """ - comp_url = URL_COMPONENT.format(url=self.baseurl, realm=realm, id=cid) - try: - return json.loads(to_native(open_url(comp_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) - except HTTPError as e: - if e.code == 404: - return None - else: - self.module.fail_json(msg='Could not fetch component %s in realm %s: %s' - % (cid, realm, str(e))) - except Exception as e: - self.module.fail_json(msg='Could not fetch component %s in realm %s: %s' - % (cid, realm, str(e))) - - def create_component(self, comprep, realm='master'): - """ Create an component. - :param comprep: Component representation of the component to be created. - :param realm: Realm in which this component resides, default "master". - :return: Component representation of the created component - """ - comps_url = URL_COMPONENTS.format(url=self.baseurl, realm=realm) - try: - resp = open_url(comps_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(comprep), validate_certs=self.validate_certs) - comp_url = resp.getheader('Location') - if comp_url is None: - self.module.fail_json(msg='Could not create component in realm %s: %s' - % (realm, 'unexpected response')) - return json.loads(to_native(open_url(comp_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) - except Exception as e: - self.module.fail_json(msg='Could not create component in realm %s: %s' - % (realm, str(e))) - - def update_component(self, comprep, realm='master'): - """ Update an existing component. - :param comprep: Component representation of the component to be updated. - :param realm: Realm in which this component resides, default "master". - :return HTTPResponse object on success - """ - cid = comprep.get('id') - if cid is None: - self.module.fail_json(msg='Cannot update component without id') - comp_url = URL_COMPONENT.format(url=self.baseurl, realm=realm, id=cid) - try: - return open_url(comp_url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(comprep), validate_certs=self.validate_certs) - except Exception as e: - self.module.fail_json(msg='Could not update component %s in realm %s: %s' - % (cid, realm, str(e))) - - def delete_component(self, cid, realm='master'): - """ Delete an component. - :param cid: Unique ID of the component. - :param realm: Realm in which this component resides, default "master". - """ - comp_url = URL_COMPONENT.format(url=self.baseurl, realm=realm, id=cid) - try: - return open_url(comp_url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) - except Exception as e: - self.module.fail_json(msg='Unable to delete component %s in realm %s: %s' - % (cid, realm, str(e))) diff --git a/ansible_collections/community/general/plugins/module_utils/ilo_redfish_utils.py b/ansible_collections/community/general/plugins/module_utils/ilo_redfish_utils.py deleted file mode 100644 index 04b08ae5..00000000 --- a/ansible_collections/community/general/plugins/module_utils/ilo_redfish_utils.py +++ /dev/null @@ -1,232 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved. -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils - - -class iLORedfishUtils(RedfishUtils): - - def get_ilo_sessions(self): - result = {} - # listing all users has always been slower than other operations, why? - session_list = [] - sessions_results = [] - # Get these entries, but does not fail if not found - properties = ['Description', 'Id', 'Name', 'UserName'] - - # Changed self.sessions_uri to Hardcoded string. - response = self.get_request( - self.root_uri + self.service_root + "SessionService/Sessions/") - if not response['ret']: - return response - result['ret'] = True - data = response['data'] - - if 'Oem' in data: - if data["Oem"]["Hpe"]["Links"]["MySession"]["@odata.id"]: - current_session = data["Oem"]["Hpe"]["Links"]["MySession"]["@odata.id"] - - for sessions in data[u'Members']: - # session_list[] are URIs - session_list.append(sessions[u'@odata.id']) - # for each session, get details - for uri in session_list: - session = {} - if uri != current_session: - response = self.get_request(self.root_uri + uri) - if not response['ret']: - return response - data = response['data'] - for property in properties: - if property in data: - session[property] = data[property] - sessions_results.append(session) - result["msg"] = sessions_results - result["ret"] = True - return result - - def set_ntp_server(self, mgr_attributes): - result = {} - setkey = mgr_attributes['mgr_attr_name'] - - nic_info = self.get_manager_ethernet_uri() - ethuri = nic_info["nic_addr"] - - response = self.get_request(self.root_uri + ethuri) - if not response['ret']: - return response - result['ret'] = True - data = response['data'] - payload = {"DHCPv4": { - "UseNTPServers": "" - }} - - if data["DHCPv4"]["UseNTPServers"]: - payload["DHCPv4"]["UseNTPServers"] = False - res_dhv4 = self.patch_request(self.root_uri + ethuri, payload) - if not res_dhv4['ret']: - return res_dhv4 - - payload = {"DHCPv6": { - "UseNTPServers": "" - }} - - if data["DHCPv6"]["UseNTPServers"]: - payload["DHCPv6"]["UseNTPServers"] = False - res_dhv6 = self.patch_request(self.root_uri + ethuri, payload) - if not res_dhv6['ret']: - return res_dhv6 - - datetime_uri = self.manager_uri + "DateTime" - - response = self.get_request(self.root_uri + datetime_uri) - if not response['ret']: - return response - - data = response['data'] - - ntp_list = data[setkey] - if(len(ntp_list) == 2): - ntp_list.pop(0) - - ntp_list.append(mgr_attributes['mgr_attr_value']) - - payload = {setkey: ntp_list} - - response1 = self.patch_request(self.root_uri + datetime_uri, payload) - if not response1['ret']: - return response1 - - return {'ret': True, 'changed': True, 'msg': "Modified %s" % mgr_attributes['mgr_attr_name']} - - def set_time_zone(self, attr): - key = attr['mgr_attr_name'] - - uri = self.manager_uri + "DateTime/" - response = self.get_request(self.root_uri + uri) - if not response['ret']: - return response - - data = response["data"] - - if key not in data: - return {'ret': False, 'changed': False, 'msg': "Key %s not found" % key} - - timezones = data["TimeZoneList"] - index = "" - for tz in timezones: - if attr['mgr_attr_value'] in tz["Name"]: - index = tz["Index"] - break - - payload = {key: {"Index": index}} - response = self.patch_request(self.root_uri + uri, payload) - if not response['ret']: - return response - - return {'ret': True, 'changed': True, 'msg': "Modified %s" % attr['mgr_attr_name']} - - def set_dns_server(self, attr): - key = attr['mgr_attr_name'] - nic_info = self.get_manager_ethernet_uri() - uri = nic_info["nic_addr"] - - response = self.get_request(self.root_uri + uri) - if not response['ret']: - return response - - data = response['data'] - - dns_list = data["Oem"]["Hpe"]["IPv4"][key] - - if len(dns_list) == 3: - dns_list.pop(0) - - dns_list.append(attr['mgr_attr_value']) - - payload = { - "Oem": { - "Hpe": { - "IPv4": { - key: dns_list - } - } - } - } - - response = self.patch_request(self.root_uri + uri, payload) - if not response['ret']: - return response - - return {'ret': True, 'changed': True, 'msg': "Modified %s" % attr['mgr_attr_name']} - - def set_domain_name(self, attr): - key = attr['mgr_attr_name'] - - nic_info = self.get_manager_ethernet_uri() - ethuri = nic_info["nic_addr"] - - response = self.get_request(self.root_uri + ethuri) - if not response['ret']: - return response - - data = response['data'] - - payload = {"DHCPv4": { - "UseDomainName": "" - }} - - if data["DHCPv4"]["UseDomainName"]: - payload["DHCPv4"]["UseDomainName"] = False - res_dhv4 = self.patch_request(self.root_uri + ethuri, payload) - if not res_dhv4['ret']: - return res_dhv4 - - payload = {"DHCPv6": { - "UseDomainName": "" - }} - - if data["DHCPv6"]["UseDomainName"]: - payload["DHCPv6"]["UseDomainName"] = False - res_dhv6 = self.patch_request(self.root_uri + ethuri, payload) - if not res_dhv6['ret']: - return res_dhv6 - - domain_name = attr['mgr_attr_value'] - - payload = {"Oem": { - "Hpe": { - key: domain_name - } - }} - - response = self.patch_request(self.root_uri + ethuri, payload) - if not response['ret']: - return response - return {'ret': True, 'changed': True, 'msg': "Modified %s" % attr['mgr_attr_name']} - - def set_wins_registration(self, mgrattr): - Key = mgrattr['mgr_attr_name'] - - nic_info = self.get_manager_ethernet_uri() - ethuri = nic_info["nic_addr"] - - payload = { - "Oem": { - "Hpe": { - "IPv4": { - Key: False - } - } - } - } - - response = self.patch_request(self.root_uri + ethuri, payload) - if not response['ret']: - return response - return {'ret': True, 'changed': True, 'msg': "Modified %s" % mgrattr['mgr_attr_name']} diff --git a/ansible_collections/community/general/plugins/module_utils/influxdb.py b/ansible_collections/community/general/plugins/module_utils/influxdb.py deleted file mode 100644 index c171131a..00000000 --- a/ansible_collections/community/general/plugins/module_utils/influxdb.py +++ /dev/null @@ -1,93 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, Ansible Project -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -import traceback - -from ansible.module_utils.basic import missing_required_lib - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests.exceptions - HAS_REQUESTS = True -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - HAS_REQUESTS = False - -INFLUXDB_IMP_ERR = None -try: - from influxdb import InfluxDBClient - from influxdb import __version__ as influxdb_version - from influxdb import exceptions - HAS_INFLUXDB = True -except ImportError: - INFLUXDB_IMP_ERR = traceback.format_exc() - HAS_INFLUXDB = False - - -class InfluxDb(): - def __init__(self, module): - self.module = module - self.params = self.module.params - self.check_lib() - self.hostname = self.params['hostname'] - self.port = self.params['port'] - self.path = self.params['path'] - self.username = self.params['username'] - self.password = self.params['password'] - self.database_name = self.params.get('database_name') - - def check_lib(self): - if not HAS_REQUESTS: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - - if not HAS_INFLUXDB: - self.module.fail_json(msg=missing_required_lib('influxdb'), exception=INFLUXDB_IMP_ERR) - - @staticmethod - def influxdb_argument_spec(): - return dict( - hostname=dict(type='str', default='localhost'), - port=dict(type='int', default=8086), - path=dict(type='str', default=''), - username=dict(type='str', default='root', aliases=['login_username']), - password=dict(type='str', default='root', no_log=True, aliases=['login_password']), - ssl=dict(type='bool', default=False), - validate_certs=dict(type='bool', default=True), - timeout=dict(type='int'), - retries=dict(type='int', default=3), - proxies=dict(type='dict', default={}), - use_udp=dict(type='bool', default=False), - udp_port=dict(type='int', default=4444), - ) - - def connect_to_influxdb(self): - args = dict( - host=self.hostname, - port=self.port, - username=self.username, - password=self.password, - database=self.database_name, - ssl=self.params['ssl'], - verify_ssl=self.params['validate_certs'], - timeout=self.params['timeout'], - use_udp=self.params['use_udp'], - udp_port=self.params['udp_port'], - proxies=self.params['proxies'], - ) - influxdb_api_version = LooseVersion(influxdb_version) - if influxdb_api_version >= LooseVersion('4.1.0'): - # retries option is added in version 4.1.0 - args.update(retries=self.params['retries']) - - if influxdb_api_version >= LooseVersion('5.1.0'): - # path argument is added in version 5.1.0 - args.update(path=self.path) - - return InfluxDBClient(**args) diff --git a/ansible_collections/community/general/plugins/module_utils/ipa.py b/ansible_collections/community/general/plugins/module_utils/ipa.py deleted file mode 100644 index 3d8c2580..00000000 --- a/ansible_collections/community/general/plugins/module_utils/ipa.py +++ /dev/null @@ -1,214 +0,0 @@ -# -*- coding: utf-8 -*- -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by Ansible -# still belong to the author of the module, and may assign their own license -# to the complete work. -# -# Copyright (c) 2016 Thomas Krahn (@Nosmoht) -# -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import json -import os -import socket -import uuid - -import re -from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text -from ansible.module_utils.six import PY3 -from ansible.module_utils.six.moves.urllib.parse import quote -from ansible.module_utils.urls import fetch_url, HAS_GSSAPI -from ansible.module_utils.basic import env_fallback, AnsibleFallbackNotFound - - -def _env_then_dns_fallback(*args, **kwargs): - ''' Load value from environment or DNS in that order''' - try: - result = env_fallback(*args, **kwargs) - if result == '': - raise AnsibleFallbackNotFound - return result - except AnsibleFallbackNotFound: - # If no host was given, we try to guess it from IPA. - # The ipa-ca entry is a standard entry that IPA will have set for - # the CA. - try: - return socket.gethostbyaddr(socket.gethostbyname('ipa-ca'))[0] - except Exception: - raise AnsibleFallbackNotFound - - -class IPAClient(object): - def __init__(self, module, host, port, protocol): - self.host = host - self.port = port - self.protocol = protocol - self.module = module - self.headers = None - self.timeout = module.params.get('ipa_timeout') - self.use_gssapi = False - - def get_base_url(self): - return '%s://%s/ipa' % (self.protocol, self.host) - - def get_json_url(self): - return '%s/session/json' % self.get_base_url() - - def login(self, username, password): - if 'KRB5CCNAME' in os.environ and HAS_GSSAPI: - self.use_gssapi = True - elif 'KRB5_CLIENT_KTNAME' in os.environ and HAS_GSSAPI: - ccache = "MEMORY:" + str(uuid.uuid4()) - os.environ['KRB5CCNAME'] = ccache - self.use_gssapi = True - else: - if not password: - if 'KRB5CCNAME' in os.environ or 'KRB5_CLIENT_KTNAME' in os.environ: - self.module.warn("In order to use GSSAPI, you need to install 'urllib_gssapi'") - self._fail('login', 'Password is required if not using ' - 'GSSAPI. To use GSSAPI, please set the ' - 'KRB5_CLIENT_KTNAME or KRB5CCNAME (or both) ' - ' environment variables.') - url = '%s/session/login_password' % self.get_base_url() - data = 'user=%s&password=%s' % (quote(username, safe=''), quote(password, safe='')) - headers = {'referer': self.get_base_url(), - 'Content-Type': 'application/x-www-form-urlencoded', - 'Accept': 'text/plain'} - try: - resp, info = fetch_url(module=self.module, url=url, data=to_bytes(data), headers=headers, timeout=self.timeout) - status_code = info['status'] - if status_code not in [200, 201, 204]: - self._fail('login', info['msg']) - - self.headers = {'Cookie': info.get('set-cookie')} - except Exception as e: - self._fail('login', to_native(e)) - if not self.headers: - self.headers = dict() - self.headers.update({ - 'referer': self.get_base_url(), - 'Content-Type': 'application/json', - 'Accept': 'application/json'}) - - def _fail(self, msg, e): - if 'message' in e: - err_string = e.get('message') - else: - err_string = e - self.module.fail_json(msg='%s: %s' % (msg, err_string)) - - def get_ipa_version(self): - response = self.ping()['summary'] - ipa_ver_regex = re.compile(r'IPA server version (\d\.\d\.\d).*') - version_match = ipa_ver_regex.match(response) - ipa_version = None - if version_match: - ipa_version = version_match.groups()[0] - return ipa_version - - def ping(self): - return self._post_json(method='ping', name=None) - - def _post_json(self, method, name, item=None): - if item is None: - item = {} - url = '%s/session/json' % self.get_base_url() - data = dict(method=method) - - # TODO: We should probably handle this a little better. - if method in ('ping', 'config_show', 'otpconfig_show'): - data['params'] = [[], {}] - elif method in ('config_mod', 'otpconfig_mod'): - data['params'] = [[], item] - else: - data['params'] = [[name], item] - - try: - resp, info = fetch_url(module=self.module, url=url, data=to_bytes(json.dumps(data)), - headers=self.headers, timeout=self.timeout, use_gssapi=self.use_gssapi) - status_code = info['status'] - if status_code not in [200, 201, 204]: - self._fail(method, info['msg']) - except Exception as e: - self._fail('post %s' % method, to_native(e)) - - if PY3: - charset = resp.headers.get_content_charset('latin-1') - else: - response_charset = resp.headers.getparam('charset') - if response_charset: - charset = response_charset - else: - charset = 'latin-1' - resp = json.loads(to_text(resp.read(), encoding=charset)) - err = resp.get('error') - if err is not None: - self._fail('response %s' % method, err) - - if 'result' in resp: - result = resp.get('result') - if 'result' in result: - result = result.get('result') - if isinstance(result, list): - if len(result) > 0: - return result[0] - else: - return {} - return result - return None - - def get_diff(self, ipa_data, module_data): - result = [] - for key in module_data.keys(): - mod_value = module_data.get(key, None) - if isinstance(mod_value, list): - default = [] - else: - default = None - ipa_value = ipa_data.get(key, default) - if isinstance(ipa_value, list) and not isinstance(mod_value, list): - mod_value = [mod_value] - if isinstance(ipa_value, list) and isinstance(mod_value, list): - mod_value = sorted(mod_value) - ipa_value = sorted(ipa_value) - if mod_value != ipa_value: - result.append(key) - return result - - def modify_if_diff(self, name, ipa_list, module_list, add_method, remove_method, item=None, append=None): - changed = False - diff = list(set(ipa_list) - set(module_list)) - if append is not True and len(diff) > 0: - changed = True - if not self.module.check_mode: - if item: - remove_method(name=name, item={item: diff}) - else: - remove_method(name=name, item=diff) - - diff = list(set(module_list) - set(ipa_list)) - if len(diff) > 0: - changed = True - if not self.module.check_mode: - if item: - add_method(name=name, item={item: diff}) - else: - add_method(name=name, item=diff) - - return changed - - -def ipa_argument_spec(): - return dict( - ipa_prot=dict(type='str', default='https', choices=['http', 'https'], fallback=(env_fallback, ['IPA_PROT'])), - ipa_host=dict(type='str', default='ipa.example.com', fallback=(_env_then_dns_fallback, ['IPA_HOST'])), - ipa_port=dict(type='int', default=443, fallback=(env_fallback, ['IPA_PORT'])), - ipa_user=dict(type='str', default='admin', fallback=(env_fallback, ['IPA_USER'])), - ipa_pass=dict(type='str', no_log=True, fallback=(env_fallback, ['IPA_PASS'])), - ipa_timeout=dict(type='int', default=10, fallback=(env_fallback, ['IPA_TIMEOUT'])), - validate_certs=dict(type='bool', default=True), - ) diff --git a/ansible_collections/community/general/plugins/module_utils/ldap.py b/ansible_collections/community/general/plugins/module_utils/ldap.py deleted file mode 100644 index 30dbaf76..00000000 --- a/ansible_collections/community/general/plugins/module_utils/ldap.py +++ /dev/null @@ -1,92 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Peter Sagerson -# Copyright: (c) 2016, Jiri Tyr -# Copyright: (c) 2017-2018 Keller Fuchs (@KellerFuchs) -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -import traceback -from ansible.module_utils.common.text.converters import to_native - -try: - import ldap - import ldap.sasl - - HAS_LDAP = True - - SASCL_CLASS = { - 'gssapi': ldap.sasl.gssapi, - 'external': ldap.sasl.external, - } -except ImportError: - HAS_LDAP = False - - -def gen_specs(**specs): - specs.update({ - 'bind_dn': dict(), - 'bind_pw': dict(default='', no_log=True), - 'dn': dict(required=True), - 'referrals_chasing': dict(type='str', default='anonymous', choices=['disabled', 'anonymous']), - 'server_uri': dict(default='ldapi:///'), - 'start_tls': dict(default=False, type='bool'), - 'validate_certs': dict(default=True, type='bool'), - 'sasl_class': dict(choices=['external', 'gssapi'], default='external', type='str'), - }) - - return specs - - -class LdapGeneric(object): - def __init__(self, module): - # Shortcuts - self.module = module - self.bind_dn = self.module.params['bind_dn'] - self.bind_pw = self.module.params['bind_pw'] - self.dn = self.module.params['dn'] - self.referrals_chasing = self.module.params['referrals_chasing'] - self.server_uri = self.module.params['server_uri'] - self.start_tls = self.module.params['start_tls'] - self.verify_cert = self.module.params['validate_certs'] - self.sasl_class = self.module.params['sasl_class'] - - # Establish connection - self.connection = self._connect_to_ldap() - - def fail(self, msg, exn): - self.module.fail_json( - msg=msg, - details=to_native(exn), - exception=traceback.format_exc() - ) - - def _connect_to_ldap(self): - if not self.verify_cert: - ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER) - - connection = ldap.initialize(self.server_uri) - - if self.referrals_chasing == 'disabled': - # Switch off chasing of referrals (https://github.com/ansible-collections/community.general/issues/1067) - connection.set_option(ldap.OPT_REFERRALS, 0) - - if self.start_tls: - try: - connection.start_tls_s() - except ldap.LDAPError as e: - self.fail("Cannot start TLS.", e) - - try: - if self.bind_dn is not None: - connection.simple_bind_s(self.bind_dn, self.bind_pw) - else: - klass = SASCL_CLASS.get(self.sasl_class, ldap.sasl.external) - connection.sasl_interactive_bind_s('', klass()) - except ldap.LDAPError as e: - self.fail("Cannot bind to the server.", e) - - return connection diff --git a/ansible_collections/community/general/plugins/module_utils/linode.py b/ansible_collections/community/general/plugins/module_utils/linode.py deleted file mode 100644 index 9d7c37e6..00000000 --- a/ansible_collections/community/general/plugins/module_utils/linode.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by Ansible -# still belong to the author of the module, and may assign their own license -# to the complete work. -# -# Copyright (c), Luke Murphy @decentral1se -# -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -def get_user_agent(module): - """Retrieve a user-agent to send with LinodeClient requests.""" - try: - from ansible.module_utils.ansible_release import __version__ as ansible_version - except ImportError: - ansible_version = 'unknown' - return 'Ansible-%s/%s' % (module, ansible_version) diff --git a/ansible_collections/community/general/plugins/module_utils/lxd.py b/ansible_collections/community/general/plugins/module_utils/lxd.py deleted file mode 100644 index e25caf11..00000000 --- a/ansible_collections/community/general/plugins/module_utils/lxd.py +++ /dev/null @@ -1,132 +0,0 @@ -# -*- coding: utf-8 -*- - -# (c) 2016, Hiroaki Nakamura -# -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by Ansible -# still belong to the author of the module, and may assign their own license -# to the complete work. -# -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -import socket -import ssl - -from ansible.module_utils.urls import generic_urlparse -from ansible.module_utils.six.moves.urllib.parse import urlparse -from ansible.module_utils.six.moves import http_client -from ansible.module_utils.common.text.converters import to_text - -# httplib/http.client connection using unix domain socket -HTTPConnection = http_client.HTTPConnection -HTTPSConnection = http_client.HTTPSConnection - -import json - - -class UnixHTTPConnection(HTTPConnection): - def __init__(self, path): - HTTPConnection.__init__(self, 'localhost') - self.path = path - - def connect(self): - sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - sock.connect(self.path) - self.sock = sock - - -class LXDClientException(Exception): - def __init__(self, msg, **kwargs): - self.msg = msg - self.kwargs = kwargs - - -class LXDClient(object): - def __init__(self, url, key_file=None, cert_file=None, debug=False): - """LXD Client. - - :param url: The URL of the LXD server. (e.g. unix:/var/lib/lxd/unix.socket or https://127.0.0.1) - :type url: ``str`` - :param key_file: The path of the client certificate key file. - :type key_file: ``str`` - :param cert_file: The path of the client certificate file. - :type cert_file: ``str`` - :param debug: The debug flag. The request and response are stored in logs when debug is true. - :type debug: ``bool`` - """ - self.url = url - self.debug = debug - self.logs = [] - if url.startswith('https:'): - self.cert_file = cert_file - self.key_file = key_file - parts = generic_urlparse(urlparse(self.url)) - ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) - ctx.load_cert_chain(cert_file, keyfile=key_file) - self.connection = HTTPSConnection(parts.get('netloc'), context=ctx) - elif url.startswith('unix:'): - unix_socket_path = url[len('unix:'):] - self.connection = UnixHTTPConnection(unix_socket_path) - else: - raise LXDClientException('URL scheme must be unix: or https:') - - def do(self, method, url, body_json=None, ok_error_codes=None, timeout=None, wait_for_container=None): - resp_json = self._send_request(method, url, body_json=body_json, ok_error_codes=ok_error_codes, timeout=timeout) - if resp_json['type'] == 'async': - url = '{0}/wait'.format(resp_json['operation']) - resp_json = self._send_request('GET', url) - if wait_for_container: - while resp_json['metadata']['status'] == 'Running': - resp_json = self._send_request('GET', url) - if resp_json['metadata']['status'] != 'Success': - self._raise_err_from_json(resp_json) - return resp_json - - def authenticate(self, trust_password): - body_json = {'type': 'client', 'password': trust_password} - return self._send_request('POST', '/1.0/certificates', body_json=body_json) - - def _send_request(self, method, url, body_json=None, ok_error_codes=None, timeout=None): - try: - body = json.dumps(body_json) - self.connection.request(method, url, body=body) - resp = self.connection.getresponse() - resp_data = resp.read() - resp_data = to_text(resp_data, errors='surrogate_or_strict') - resp_json = json.loads(resp_data) - self.logs.append({ - 'type': 'sent request', - 'request': {'method': method, 'url': url, 'json': body_json, 'timeout': timeout}, - 'response': {'json': resp_json} - }) - resp_type = resp_json.get('type', None) - if resp_type == 'error': - if ok_error_codes is not None and resp_json['error_code'] in ok_error_codes: - return resp_json - if resp_json['error'] == "Certificate already in trust store": - return resp_json - self._raise_err_from_json(resp_json) - return resp_json - except socket.error as e: - raise LXDClientException('cannot connect to the LXD server', err=e) - - def _raise_err_from_json(self, resp_json): - err_params = {} - if self.debug: - err_params['logs'] = self.logs - raise LXDClientException(self._get_err_from_resp_json(resp_json), **err_params) - - @staticmethod - def _get_err_from_resp_json(resp_json): - err = None - metadata = resp_json.get('metadata', None) - if metadata is not None: - err = metadata.get('err', None) - if err is None: - err = resp_json.get('error', None) - return err diff --git a/ansible_collections/community/general/plugins/module_utils/manageiq.py b/ansible_collections/community/general/plugins/module_utils/manageiq.py deleted file mode 100644 index 98e5590c..00000000 --- a/ansible_collections/community/general/plugins/module_utils/manageiq.py +++ /dev/null @@ -1,157 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (c) 2017, Daniel Korn -# -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by Ansible -# still belong to the author of the module, and may assign their own license -# to the complete work. -# -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -import os -import traceback - -from ansible.module_utils.basic import missing_required_lib - -CLIENT_IMP_ERR = None -try: - from manageiq_client.api import ManageIQClient - HAS_CLIENT = True -except ImportError: - CLIENT_IMP_ERR = traceback.format_exc() - HAS_CLIENT = False - - -def manageiq_argument_spec(): - options = dict( - url=dict(default=os.environ.get('MIQ_URL', None)), - username=dict(default=os.environ.get('MIQ_USERNAME', None)), - password=dict(default=os.environ.get('MIQ_PASSWORD', None), no_log=True), - token=dict(default=os.environ.get('MIQ_TOKEN', None), no_log=True), - validate_certs=dict(default=True, type='bool', aliases=['verify_ssl']), - ca_cert=dict(required=False, default=None, aliases=['ca_bundle_path']), - ) - - return dict( - manageiq_connection=dict(type='dict', - apply_defaults=True, - options=options), - ) - - -def check_client(module): - if not HAS_CLIENT: - module.fail_json(msg=missing_required_lib('manageiq-client'), exception=CLIENT_IMP_ERR) - - -def validate_connection_params(module): - params = module.params['manageiq_connection'] - error_str = "missing required argument: manageiq_connection[{}]" - url = params['url'] - token = params['token'] - username = params['username'] - password = params['password'] - - if (url and username and password) or (url and token): - return params - for arg in ['url', 'username', 'password']: - if params[arg] in (None, ''): - module.fail_json(msg=error_str.format(arg)) - - -def manageiq_entities(): - return { - 'provider': 'providers', 'host': 'hosts', 'vm': 'vms', - 'category': 'categories', 'cluster': 'clusters', 'data store': 'data_stores', - 'group': 'groups', 'resource pool': 'resource_pools', 'service': 'services', - 'service template': 'service_templates', 'template': 'templates', - 'tenant': 'tenants', 'user': 'users', 'blueprint': 'blueprints' - } - - -class ManageIQ(object): - """ - class encapsulating ManageIQ API client. - """ - - def __init__(self, module): - # handle import errors - check_client(module) - - params = validate_connection_params(module) - - url = params['url'] - username = params['username'] - password = params['password'] - token = params['token'] - verify_ssl = params['validate_certs'] - ca_bundle_path = params['ca_cert'] - - self._module = module - self._api_url = url + '/api' - self._auth = dict(user=username, password=password, token=token) - try: - self._client = ManageIQClient(self._api_url, self._auth, verify_ssl=verify_ssl, ca_bundle_path=ca_bundle_path) - except Exception as e: - self.module.fail_json(msg="failed to open connection (%s): %s" % (url, str(e))) - - @property - def module(self): - """ Ansible module module - - Returns: - the ansible module - """ - return self._module - - @property - def api_url(self): - """ Base ManageIQ API - - Returns: - the base ManageIQ API - """ - return self._api_url - - @property - def client(self): - """ ManageIQ client - - Returns: - the ManageIQ client - """ - return self._client - - def find_collection_resource_by(self, collection_name, **params): - """ Searches the collection resource by the collection name and the param passed. - - Returns: - the resource as an object if it exists in manageiq, None otherwise. - """ - try: - entity = self.client.collections.__getattribute__(collection_name).get(**params) - except ValueError: - return None - except Exception as e: - self.module.fail_json(msg="failed to find resource {error}".format(error=e)) - return vars(entity) - - def find_collection_resource_or_fail(self, collection_name, **params): - """ Searches the collection resource by the collection name and the param passed. - - Returns: - the resource as an object if it exists in manageiq, Fail otherwise. - """ - resource = self.find_collection_resource_by(collection_name, **params) - if resource: - return resource - else: - msg = "{collection_name} where {params} does not exist in manageiq".format( - collection_name=collection_name, params=str(params)) - self.module.fail_json(msg=msg) diff --git a/ansible_collections/community/general/plugins/module_utils/mh/base.py b/ansible_collections/community/general/plugins/module_utils/mh/base.py deleted file mode 100644 index 90c228b3..00000000 --- a/ansible_collections/community/general/plugins/module_utils/mh/base.py +++ /dev/null @@ -1,69 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2020, Alexei Znamensky -# Copyright: (c) 2020, Ansible Project -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException as _MHE -from ansible_collections.community.general.plugins.module_utils.mh.deco import module_fails_on_exception - - -class ModuleHelperBase(object): - module = None - ModuleHelperException = _MHE - - def __init__(self, module=None): - self._changed = False - - if module: - self.module = module - - if not isinstance(self.module, AnsibleModule): - self.module = AnsibleModule(**self.module) - - def __init_module__(self): - pass - - def __run__(self): - raise NotImplementedError() - - def __quit_module__(self): - pass - - def __changed__(self): - raise NotImplementedError() - - @property - def changed(self): - try: - return self.__changed__() - except NotImplementedError: - return self._changed - - @changed.setter - def changed(self, value): - self._changed = value - - def has_changed(self): - raise NotImplementedError() - - @property - def output(self): - raise NotImplementedError() - - @module_fails_on_exception - def run(self): - self.__init_module__() - self.__run__() - self.__quit_module__() - output = self.output - if 'failed' not in output: - output['failed'] = False - self.module.exit_json(changed=self.has_changed(), **output) - - @classmethod - def execute(cls, module=None): - cls(module).run() diff --git a/ansible_collections/community/general/plugins/module_utils/mh/exceptions.py b/ansible_collections/community/general/plugins/module_utils/mh/exceptions.py deleted file mode 100644 index 558dcca0..00000000 --- a/ansible_collections/community/general/plugins/module_utils/mh/exceptions.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2020, Alexei Znamensky -# Copyright: (c) 2020, Ansible Project -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -class ModuleHelperException(Exception): - @staticmethod - def _get_remove(key, kwargs): - if key in kwargs: - result = kwargs[key] - del kwargs[key] - return result - return None - - def __init__(self, *args, **kwargs): - self.msg = self._get_remove('msg', kwargs) or "Module failed with exception: {0}".format(self) - self.update_output = self._get_remove('update_output', kwargs) or {} - super(ModuleHelperException, self).__init__(*args) diff --git a/ansible_collections/community/general/plugins/module_utils/mh/mixins/deps.py b/ansible_collections/community/general/plugins/module_utils/mh/mixins/deps.py deleted file mode 100644 index 1c6c9ae4..00000000 --- a/ansible_collections/community/general/plugins/module_utils/mh/mixins/deps.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2020, Alexei Znamensky -# Copyright: (c) 2020, Ansible Project -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -import traceback - -from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase -from ansible_collections.community.general.plugins.module_utils.mh.deco import module_fails_on_exception - - -class DependencyCtxMgr(object): - def __init__(self, name, msg=None): - self.name = name - self.msg = msg - self.has_it = False - self.exc_type = None - self.exc_val = None - self.exc_tb = None - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.has_it = exc_type is None - self.exc_type = exc_type - self.exc_val = exc_val - self.exc_tb = exc_tb - return not self.has_it - - @property - def text(self): - return self.msg or str(self.exc_val) - - -class DependencyMixin(ModuleHelperBase): - _dependencies = [] - - @classmethod - def dependency(cls, name, msg): - cls._dependencies.append(DependencyCtxMgr(name, msg)) - return cls._dependencies[-1] - - def fail_on_missing_deps(self): - for d in self._dependencies: - if not d.has_it: - self.module.fail_json(changed=False, - exception="\n".join(traceback.format_exception(d.exc_type, d.exc_val, d.exc_tb)), - msg=d.text, - **self.output) - - @module_fails_on_exception - def run(self): - self.fail_on_missing_deps() - super(DependencyMixin, self).run() diff --git a/ansible_collections/community/general/plugins/module_utils/mh/module_helper.py b/ansible_collections/community/general/plugins/module_utils/mh/module_helper.py deleted file mode 100644 index 65842fd7..00000000 --- a/ansible_collections/community/general/plugins/module_utils/mh/module_helper.py +++ /dev/null @@ -1,89 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2020, Alexei Znamensky -# Copyright: (c) 2020, Ansible Project -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -from ansible.module_utils.common.dict_transformations import dict_merge - -from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase, AnsibleModule -from ansible_collections.community.general.plugins.module_utils.mh.mixins.cmd import CmdMixin -from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin -from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyMixin -from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarsMixin, VarDict as _VD -from ansible_collections.community.general.plugins.module_utils.mh.mixins.deprecate_attrs import DeprecateAttrsMixin - - -class ModuleHelper(DeprecateAttrsMixin, VarsMixin, DependencyMixin, ModuleHelperBase): - _output_conflict_list = ('msg', 'exception', 'output', 'vars', 'changed') - facts_name = None - output_params = () - diff_params = () - change_params = () - facts_params = () - - VarDict = _VD # for backward compatibility, will be deprecated at some point - - def __init__(self, module=None): - super(ModuleHelper, self).__init__(module) - for name, value in self.module.params.items(): - self.vars.set( - name, value, - diff=name in self.diff_params, - output=name in self.output_params, - change=None if not self.change_params else name in self.change_params, - fact=name in self.facts_params, - ) - - self._deprecate_attr( - attr="VarDict", - msg="ModuleHelper.VarDict attribute is deprecated, use VarDict from " - "the ansible_collections.community.general.plugins.module_utils.mh.mixins.vars module instead", - version="6.0.0", - collection_name="community.general", - target=ModuleHelper, - module=self.module) - - def update_output(self, **kwargs): - self.update_vars(meta={"output": True}, **kwargs) - - def update_facts(self, **kwargs): - self.update_vars(meta={"fact": True}, **kwargs) - - def _vars_changed(self): - return any(self.vars.has_changed(v) for v in self.vars.change_vars()) - - def has_changed(self): - return self.changed or self._vars_changed() - - @property - def output(self): - result = dict(self.vars.output()) - if self.facts_name: - facts = self.vars.facts() - if facts is not None: - result['ansible_facts'] = {self.facts_name: facts} - if self.module._diff: - diff = result.get('diff', {}) - vars_diff = self.vars.diff() or {} - result['diff'] = dict_merge(dict(diff), vars_diff) - - for varname in result: - if varname in self._output_conflict_list: - result["_" + varname] = result[varname] - del result[varname] - return result - - -class StateModuleHelper(StateMixin, ModuleHelper): - pass - - -class CmdModuleHelper(CmdMixin, ModuleHelper): - pass - - -class CmdStateModuleHelper(CmdMixin, StateMixin, ModuleHelper): - pass diff --git a/ansible_collections/community/general/plugins/module_utils/module_helper.py b/ansible_collections/community/general/plugins/module_utils/module_helper.py deleted file mode 100644 index a6b35bdd..00000000 --- a/ansible_collections/community/general/plugins/module_utils/module_helper.py +++ /dev/null @@ -1,18 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2020, Alexei Znamensky -# Copyright: (c) 2020, Ansible Project -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -from ansible_collections.community.general.plugins.module_utils.mh.module_helper import ( - ModuleHelper, StateModuleHelper, CmdModuleHelper, CmdStateModuleHelper, AnsibleModule -) -from ansible_collections.community.general.plugins.module_utils.mh.mixins.cmd import CmdMixin, ArgFormat -from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin -from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyCtxMgr -from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException -from ansible_collections.community.general.plugins.module_utils.mh.deco import cause_changes, module_fails_on_exception -from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarMeta, VarDict diff --git a/ansible_collections/community/general/plugins/module_utils/oneview.py b/ansible_collections/community/general/plugins/module_utils/oneview.py deleted file mode 100644 index 6d786b0b..00000000 --- a/ansible_collections/community/general/plugins/module_utils/oneview.py +++ /dev/null @@ -1,486 +0,0 @@ -# -*- coding: utf-8 -*- -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by Ansible -# still belong to the author of the module, and may assign their own license -# to the complete work. -# -# Copyright (2016-2017) Hewlett Packard Enterprise Development LP -# -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import abc -import collections -import json -import os -import traceback - -HPE_ONEVIEW_IMP_ERR = None -try: - from hpOneView.oneview_client import OneViewClient - HAS_HPE_ONEVIEW = True -except ImportError: - HPE_ONEVIEW_IMP_ERR = traceback.format_exc() - HAS_HPE_ONEVIEW = False - -from ansible.module_utils import six -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.common._collections_compat import Mapping - - -def transform_list_to_dict(list_): - """ - Transforms a list into a dictionary, putting values as keys. - - :arg list list_: List of values - :return: dict: dictionary built - """ - - ret = {} - - if not list_: - return ret - - for value in list_: - if isinstance(value, Mapping): - ret.update(value) - else: - ret[to_native(value, errors='surrogate_or_strict')] = True - - return ret - - -def merge_list_by_key(original_list, updated_list, key, ignore_when_null=None): - """ - Merge two lists by the key. It basically: - - 1. Adds the items that are present on updated_list and are absent on original_list. - - 2. Removes items that are absent on updated_list and are present on original_list. - - 3. For all items that are in both lists, overwrites the values from the original item by the updated item. - - :arg list original_list: original list. - :arg list updated_list: list with changes. - :arg str key: unique identifier. - :arg list ignore_when_null: list with the keys from the updated items that should be ignored in the merge, - if its values are null. - :return: list: Lists merged. - """ - ignore_when_null = [] if ignore_when_null is None else ignore_when_null - - if not original_list: - return updated_list - - items_map = collections.OrderedDict([(i[key], i.copy()) for i in original_list]) - - merged_items = collections.OrderedDict() - - for item in updated_list: - item_key = item[key] - if item_key in items_map: - for ignored_key in ignore_when_null: - if ignored_key in item and item[ignored_key] is None: - item.pop(ignored_key) - merged_items[item_key] = items_map[item_key] - merged_items[item_key].update(item) - else: - merged_items[item_key] = item - - return list(merged_items.values()) - - -def _str_sorted(obj): - if isinstance(obj, Mapping): - return json.dumps(obj, sort_keys=True) - else: - return str(obj) - - -def _standardize_value(value): - """ - Convert value to string to enhance the comparison. - - :arg value: Any object type. - - :return: str: Converted value. - """ - if isinstance(value, float) and value.is_integer(): - # Workaround to avoid erroneous comparison between int and float - # Removes zero from integer floats - value = int(value) - - return str(value) - - -class OneViewModuleException(Exception): - """ - OneView base Exception. - - Attributes: - msg (str): Exception message. - oneview_response (dict): OneView rest response. - """ - - def __init__(self, data): - self.msg = None - self.oneview_response = None - - if isinstance(data, six.string_types): - self.msg = data - else: - self.oneview_response = data - - if data and isinstance(data, dict): - self.msg = data.get('message') - - if self.oneview_response: - Exception.__init__(self, self.msg, self.oneview_response) - else: - Exception.__init__(self, self.msg) - - -class OneViewModuleTaskError(OneViewModuleException): - """ - OneView Task Error Exception. - - Attributes: - msg (str): Exception message. - error_code (str): A code which uniquely identifies the specific error. - """ - - def __init__(self, msg, error_code=None): - super(OneViewModuleTaskError, self).__init__(msg) - self.error_code = error_code - - -class OneViewModuleValueError(OneViewModuleException): - """ - OneView Value Error. - The exception is raised when the data contains an inappropriate value. - - Attributes: - msg (str): Exception message. - """ - pass - - -class OneViewModuleResourceNotFound(OneViewModuleException): - """ - OneView Resource Not Found Exception. - The exception is raised when an associated resource was not found. - - Attributes: - msg (str): Exception message. - """ - pass - - -@six.add_metaclass(abc.ABCMeta) -class OneViewModuleBase(object): - MSG_CREATED = 'Resource created successfully.' - MSG_UPDATED = 'Resource updated successfully.' - MSG_DELETED = 'Resource deleted successfully.' - MSG_ALREADY_PRESENT = 'Resource is already present.' - MSG_ALREADY_ABSENT = 'Resource is already absent.' - MSG_DIFF_AT_KEY = 'Difference found at key \'{0}\'. ' - - ONEVIEW_COMMON_ARGS = dict( - config=dict(type='path'), - hostname=dict(type='str'), - username=dict(type='str'), - password=dict(type='str', no_log=True), - api_version=dict(type='int'), - image_streamer_hostname=dict(type='str') - ) - - ONEVIEW_VALIDATE_ETAG_ARGS = dict(validate_etag=dict(type='bool', default=True)) - - resource_client = None - - def __init__(self, additional_arg_spec=None, validate_etag_support=False, supports_check_mode=False): - """ - OneViewModuleBase constructor. - - :arg dict additional_arg_spec: Additional argument spec definition. - :arg bool validate_etag_support: Enables support to eTag validation. - """ - argument_spec = self._build_argument_spec(additional_arg_spec, validate_etag_support) - - self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=supports_check_mode) - - self._check_hpe_oneview_sdk() - self._create_oneview_client() - - self.state = self.module.params.get('state') - self.data = self.module.params.get('data') - - # Preload params for get_all - used by facts - self.facts_params = self.module.params.get('params') or {} - - # Preload options as dict - used by facts - self.options = transform_list_to_dict(self.module.params.get('options')) - - self.validate_etag_support = validate_etag_support - - def _build_argument_spec(self, additional_arg_spec, validate_etag_support): - - merged_arg_spec = dict() - merged_arg_spec.update(self.ONEVIEW_COMMON_ARGS) - - if validate_etag_support: - merged_arg_spec.update(self.ONEVIEW_VALIDATE_ETAG_ARGS) - - if additional_arg_spec: - merged_arg_spec.update(additional_arg_spec) - - return merged_arg_spec - - def _check_hpe_oneview_sdk(self): - if not HAS_HPE_ONEVIEW: - self.module.fail_json(msg=missing_required_lib('hpOneView'), exception=HPE_ONEVIEW_IMP_ERR) - - def _create_oneview_client(self): - if self.module.params.get('hostname'): - config = dict(ip=self.module.params['hostname'], - credentials=dict(userName=self.module.params['username'], password=self.module.params['password']), - api_version=self.module.params['api_version'], - image_streamer_ip=self.module.params['image_streamer_hostname']) - self.oneview_client = OneViewClient(config) - elif not self.module.params['config']: - self.oneview_client = OneViewClient.from_environment_variables() - else: - self.oneview_client = OneViewClient.from_json_file(self.module.params['config']) - - @abc.abstractmethod - def execute_module(self): - """ - Abstract method, must be implemented by the inheritor. - - This method is called from the run method. It should contains the module logic - - :return: dict: It must return a dictionary with the attributes for the module result, - such as ansible_facts, msg and changed. - """ - pass - - def run(self): - """ - Common implementation of the OneView run modules. - - It calls the inheritor 'execute_module' function and sends the return to the Ansible. - - It handles any OneViewModuleException in order to signal a failure to Ansible, with a descriptive error message. - - """ - try: - if self.validate_etag_support: - if not self.module.params.get('validate_etag'): - self.oneview_client.connection.disable_etag_validation() - - result = self.execute_module() - - if "changed" not in result: - result['changed'] = False - - self.module.exit_json(**result) - - except OneViewModuleException as exception: - error_msg = '; '.join(to_native(e) for e in exception.args) - self.module.fail_json(msg=error_msg, exception=traceback.format_exc()) - - def resource_absent(self, resource, method='delete'): - """ - Generic implementation of the absent state for the OneView resources. - - It checks if the resource needs to be removed. - - :arg dict resource: Resource to delete. - :arg str method: Function of the OneView client that will be called for resource deletion. - Usually delete or remove. - :return: A dictionary with the expected arguments for the AnsibleModule.exit_json - """ - if resource: - getattr(self.resource_client, method)(resource) - - return {"changed": True, "msg": self.MSG_DELETED} - else: - return {"changed": False, "msg": self.MSG_ALREADY_ABSENT} - - def get_by_name(self, name): - """ - Generic get by name implementation. - - :arg str name: Resource name to search for. - - :return: The resource found or None. - """ - result = self.resource_client.get_by('name', name) - return result[0] if result else None - - def resource_present(self, resource, fact_name, create_method='create'): - """ - Generic implementation of the present state for the OneView resources. - - It checks if the resource needs to be created or updated. - - :arg dict resource: Resource to create or update. - :arg str fact_name: Name of the fact returned to the Ansible. - :arg str create_method: Function of the OneView client that will be called for resource creation. - Usually create or add. - :return: A dictionary with the expected arguments for the AnsibleModule.exit_json - """ - - changed = False - if "newName" in self.data: - self.data["name"] = self.data.pop("newName") - - if not resource: - resource = getattr(self.resource_client, create_method)(self.data) - msg = self.MSG_CREATED - changed = True - - else: - merged_data = resource.copy() - merged_data.update(self.data) - - if self.compare(resource, merged_data): - msg = self.MSG_ALREADY_PRESENT - else: - resource = self.resource_client.update(merged_data) - changed = True - msg = self.MSG_UPDATED - - return dict( - msg=msg, - changed=changed, - ansible_facts={fact_name: resource} - ) - - def resource_scopes_set(self, state, fact_name, scope_uris): - """ - Generic implementation of the scopes update PATCH for the OneView resources. - It checks if the resource needs to be updated with the current scopes. - This method is meant to be run after ensuring the present state. - :arg dict state: Dict containing the data from the last state results in the resource. - It needs to have the 'msg', 'changed', and 'ansible_facts' entries. - :arg str fact_name: Name of the fact returned to the Ansible. - :arg list scope_uris: List with all the scope URIs to be added to the resource. - :return: A dictionary with the expected arguments for the AnsibleModule.exit_json - """ - if scope_uris is None: - scope_uris = [] - resource = state['ansible_facts'][fact_name] - operation_data = dict(operation='replace', path='/scopeUris', value=scope_uris) - - if resource['scopeUris'] is None or set(resource['scopeUris']) != set(scope_uris): - state['ansible_facts'][fact_name] = self.resource_client.patch(resource['uri'], **operation_data) - state['changed'] = True - state['msg'] = self.MSG_UPDATED - - return state - - def compare(self, first_resource, second_resource): - """ - Recursively compares dictionary contents equivalence, ignoring types and elements order. - Particularities of the comparison: - - Inexistent key = None - - These values are considered equal: None, empty, False - - Lists are compared value by value after a sort, if they have same size. - - Each element is converted to str before the comparison. - :arg dict first_resource: first dictionary - :arg dict second_resource: second dictionary - :return: bool: True when equal, False when different. - """ - resource1 = first_resource - resource2 = second_resource - - debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2) - - # The first resource is True / Not Null and the second resource is False / Null - if resource1 and not resource2: - self.module.log("resource1 and not resource2. " + debug_resources) - return False - - # Checks all keys in first dict against the second dict - for key in resource1: - if key not in resource2: - if resource1[key] is not None: - # Inexistent key is equivalent to exist with value None - self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources) - return False - # If both values are null, empty or False it will be considered equal. - elif not resource1[key] and not resource2[key]: - continue - elif isinstance(resource1[key], Mapping): - # recursive call - if not self.compare(resource1[key], resource2[key]): - self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources) - return False - elif isinstance(resource1[key], list): - # change comparison function to compare_list - if not self.compare_list(resource1[key], resource2[key]): - self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources) - return False - elif _standardize_value(resource1[key]) != _standardize_value(resource2[key]): - self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources) - return False - - # Checks all keys in the second dict, looking for missing elements - for key in resource2.keys(): - if key not in resource1: - if resource2[key] is not None: - # Inexistent key is equivalent to exist with value None - self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources) - return False - - return True - - def compare_list(self, first_resource, second_resource): - """ - Recursively compares lists contents equivalence, ignoring types and element orders. - Lists with same size are compared value by value after a sort, - each element is converted to str before the comparison. - :arg list first_resource: first list - :arg list second_resource: second list - :return: True when equal; False when different. - """ - - resource1 = first_resource - resource2 = second_resource - - debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2) - - # The second list is null / empty / False - if not resource2: - self.module.log("resource 2 is null. " + debug_resources) - return False - - if len(resource1) != len(resource2): - self.module.log("resources have different length. " + debug_resources) - return False - - resource1 = sorted(resource1, key=_str_sorted) - resource2 = sorted(resource2, key=_str_sorted) - - for i, val in enumerate(resource1): - if isinstance(val, Mapping): - # change comparison function to compare dictionaries - if not self.compare(val, resource2[i]): - self.module.log("resources are different. " + debug_resources) - return False - elif isinstance(val, list): - # recursive call - if not self.compare_list(val, resource2[i]): - self.module.log("lists are different. " + debug_resources) - return False - elif _standardize_value(val) != _standardize_value(resource2[i]): - self.module.log("values are different. " + debug_resources) - return False - - # no differences found - return True diff --git a/ansible_collections/community/general/plugins/module_utils/online.py b/ansible_collections/community/general/plugins/module_utils/online.py deleted file mode 100644 index b5acbcc0..00000000 --- a/ansible_collections/community/general/plugins/module_utils/online.py +++ /dev/null @@ -1,122 +0,0 @@ -# -*- coding: utf-8 -*- -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import json -import sys - -from ansible.module_utils.basic import env_fallback -from ansible.module_utils.urls import fetch_url - - -def online_argument_spec(): - return dict( - api_token=dict(required=True, fallback=(env_fallback, ['ONLINE_TOKEN', 'ONLINE_API_KEY', 'ONLINE_OAUTH_TOKEN', 'ONLINE_API_TOKEN']), - no_log=True, aliases=['oauth_token']), - api_url=dict(fallback=(env_fallback, ['ONLINE_API_URL']), default='https://api.online.net', aliases=['base_url']), - api_timeout=dict(type='int', default=30, aliases=['timeout']), - validate_certs=dict(default=True, type='bool'), - ) - - -class OnlineException(Exception): - - def __init__(self, message): - self.message = message - - -class Response(object): - - def __init__(self, resp, info): - self.body = None - if resp: - self.body = resp.read() - self.info = info - - @property - def json(self): - if not self.body: - if "body" in self.info: - return json.loads(self.info["body"]) - return None - try: - return json.loads(self.body) - except ValueError: - return None - - @property - def status_code(self): - return self.info["status"] - - @property - def ok(self): - return self.status_code in (200, 201, 202, 204) - - -class Online(object): - - def __init__(self, module): - self.module = module - self.headers = { - 'Authorization': "Bearer %s" % self.module.params.get('api_token'), - 'User-Agent': self.get_user_agent_string(module), - 'Content-type': 'application/json', - } - self.name = None - - def get_resources(self): - results = self.get('/%s' % self.name) - if not results.ok: - raise OnlineException('Error fetching {0} ({1}) [{2}: {3}]'.format( - self.name, '%s/%s' % (self.module.params.get('api_url'), self.name), - results.status_code, results.json['message'] - )) - - return results.json - - def _url_builder(self, path): - if path[0] == '/': - path = path[1:] - return '%s/%s' % (self.module.params.get('api_url'), path) - - def send(self, method, path, data=None, headers=None): - url = self._url_builder(path) - data = self.module.jsonify(data) - - if headers is not None: - self.headers.update(headers) - - resp, info = fetch_url( - self.module, url, data=data, headers=self.headers, method=method, - timeout=self.module.params.get('api_timeout') - ) - - # Exceptions in fetch_url may result in a status -1, the ensures a proper error to the user in all cases - if info['status'] == -1: - self.module.fail_json(msg=info['msg']) - - return Response(resp, info) - - @staticmethod - def get_user_agent_string(module): - return "ansible %s Python %s" % (module.ansible_version, sys.version.split(' ', 1)[0]) - - def get(self, path, data=None, headers=None): - return self.send('GET', path, data, headers) - - def put(self, path, data=None, headers=None): - return self.send('PUT', path, data, headers) - - def post(self, path, data=None, headers=None): - return self.send('POST', path, data, headers) - - def delete(self, path, data=None, headers=None): - return self.send('DELETE', path, data, headers) - - def patch(self, path, data=None, headers=None): - return self.send("PATCH", path, data, headers) - - def update(self, path, data=None, headers=None): - return self.send("UPDATE", path, data, headers) diff --git a/ansible_collections/community/general/plugins/module_utils/opennebula.py b/ansible_collections/community/general/plugins/module_utils/opennebula.py deleted file mode 100644 index c896a9c6..00000000 --- a/ansible_collections/community/general/plugins/module_utils/opennebula.py +++ /dev/null @@ -1,313 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2018 www.privaz.io Valletech AB -# -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -import time -import ssl -from os import environ -from ansible.module_utils.six import string_types -from ansible.module_utils.basic import AnsibleModule - - -HAS_PYONE = True - -try: - from pyone import OneException - from pyone.server import OneServer -except ImportError: - OneException = Exception - HAS_PYONE = False - - -class OpenNebulaModule: - """ - Base class for all OpenNebula Ansible Modules. - This is basically a wrapper of the common arguments, the pyone client and - some utility methods. - """ - - common_args = dict( - api_url=dict(type='str', aliases=['api_endpoint'], default=environ.get("ONE_URL")), - api_username=dict(type='str', default=environ.get("ONE_USERNAME")), - api_password=dict(type='str', no_log=True, aliases=['api_token'], default=environ.get("ONE_PASSWORD")), - validate_certs=dict(default=True, type='bool'), - wait_timeout=dict(type='int', default=300), - ) - - def __init__(self, argument_spec, supports_check_mode=False, mutually_exclusive=None, required_one_of=None, required_if=None): - - module_args = OpenNebulaModule.common_args.copy() - module_args.update(argument_spec) - - self.module = AnsibleModule(argument_spec=module_args, - supports_check_mode=supports_check_mode, - mutually_exclusive=mutually_exclusive, - required_one_of=required_one_of, - required_if=required_if) - self.result = dict(changed=False, - original_message='', - message='') - self.one = self.create_one_client() - - self.resolved_parameters = self.resolve_parameters() - - def create_one_client(self): - """ - Creates an XMLPRC client to OpenNebula. - - Returns: the new xmlrpc client. - - """ - - # context required for not validating SSL, old python versions won't validate anyway. - if hasattr(ssl, '_create_unverified_context'): - no_ssl_validation_context = ssl._create_unverified_context() - else: - no_ssl_validation_context = None - - # Check if the module can run - if not HAS_PYONE: - self.fail("pyone is required for this module") - - if self.module.params.get("api_url"): - url = self.module.params.get("api_url") - else: - self.fail("Either api_url or the environment variable ONE_URL must be provided") - - if self.module.params.get("api_username"): - username = self.module.params.get("api_username") - else: - self.fail("Either api_username or the environment vairable ONE_USERNAME must be provided") - - if self.module.params.get("api_password"): - password = self.module.params.get("api_password") - else: - self.fail("Either api_password or the environment vairable ONE_PASSWORD must be provided") - - session = "%s:%s" % (username, password) - - if not self.module.params.get("validate_certs") and "PYTHONHTTPSVERIFY" not in environ: - return OneServer(url, session=session, context=no_ssl_validation_context) - else: - return OneServer(url, session) - - def close_one_client(self): - """ - Close the pyone session. - """ - self.one.server_close() - - def fail(self, msg): - """ - Utility failure method, will ensure pyone is properly closed before failing. - Args: - msg: human readable failure reason. - """ - if hasattr(self, 'one'): - self.close_one_client() - self.module.fail_json(msg=msg) - - def exit(self): - """ - Utility exit method, will ensure pyone is properly closed before exiting. - - """ - if hasattr(self, 'one'): - self.close_one_client() - self.module.exit_json(**self.result) - - def resolve_parameters(self): - """ - This method resolves parameters provided by a secondary ID to the primary ID. - For example if cluster_name is present, cluster_id will be introduced by performing - the required resolution - - Returns: a copy of the parameters that includes the resolved parameters. - - """ - - resolved_params = dict(self.module.params) - - if 'cluster_name' in self.module.params: - clusters = self.one.clusterpool.info() - for cluster in clusters.CLUSTER: - if cluster.NAME == self.module.params.get('cluster_name'): - resolved_params['cluster_id'] = cluster.ID - - return resolved_params - - def is_parameter(self, name): - """ - Utility method to check if a parameter was provided or is resolved - Args: - name: the parameter to check - """ - if name in self.resolved_parameters: - return self.get_parameter(name) is not None - else: - return False - - def get_parameter(self, name): - """ - Utility method for accessing parameters that includes resolved ID - parameters from provided Name parameters. - """ - return self.resolved_parameters.get(name) - - def get_host_by_name(self, name): - ''' - Returns a host given its name. - Args: - name: the name of the host - - Returns: the host object or None if the host is absent. - - ''' - hosts = self.one.hostpool.info() - for h in hosts.HOST: - if h.NAME == name: - return h - return None - - def get_cluster_by_name(self, name): - """ - Returns a cluster given its name. - Args: - name: the name of the cluster - - Returns: the cluster object or None if the host is absent. - """ - - clusters = self.one.clusterpool.info() - for c in clusters.CLUSTER: - if c.NAME == name: - return c - return None - - def get_template_by_name(self, name): - ''' - Returns a template given its name. - Args: - name: the name of the template - - Returns: the template object or None if the host is absent. - - ''' - templates = self.one.templatepool.info() - for t in templates.TEMPLATE: - if t.NAME == name: - return t - return None - - def cast_template(self, template): - """ - OpenNebula handles all template elements as strings - At some point there is a cast being performed on types provided by the user - This function mimics that transformation so that required template updates are detected properly - additionally an array will be converted to a comma separated list, - which works for labels and hopefully for something more. - - Args: - template: the template to transform - - Returns: the transformed template with data casts applied. - """ - - # TODO: check formally available data types in templates - # TODO: some arrays might be converted to space separated - - for key in template: - value = template[key] - if isinstance(value, dict): - self.cast_template(template[key]) - elif isinstance(value, list): - template[key] = ', '.join(value) - elif not isinstance(value, string_types): - template[key] = str(value) - - def requires_template_update(self, current, desired): - """ - This function will help decide if a template update is required or not - If a desired key is missing from the current dictionary an update is required - If the intersection of both dictionaries is not deep equal, an update is required - Args: - current: current template as a dictionary - desired: desired template as a dictionary - - Returns: True if a template update is required - """ - - if not desired: - return False - - self.cast_template(desired) - intersection = dict() - for dkey in desired.keys(): - if dkey in current.keys(): - intersection[dkey] = current[dkey] - else: - return True - return not (desired == intersection) - - def wait_for_state(self, element_name, state, state_name, target_states, - invalid_states=None, transition_states=None, - wait_timeout=None): - """ - Args: - element_name: the name of the object we are waiting for: HOST, VM, etc. - state: lambda that returns the current state, will be queried until target state is reached - state_name: lambda that returns the readable form of a given state - target_states: states expected to be reached - invalid_states: if any of this states is reached, fail - transition_states: when used, these are the valid states during the transition. - wait_timeout: timeout period in seconds. Defaults to the provided parameter. - """ - - if not wait_timeout: - wait_timeout = self.module.params.get("wait_timeout") - - start_time = time.time() - - while (time.time() - start_time) < wait_timeout: - current_state = state() - - if current_state in invalid_states: - self.fail('invalid %s state %s' % (element_name, state_name(current_state))) - - if transition_states: - if current_state not in transition_states: - self.fail('invalid %s transition state %s' % (element_name, state_name(current_state))) - - if current_state in target_states: - return True - - time.sleep(self.one.server_retry_interval()) - - self.fail(msg="Wait timeout has expired!") - - def run_module(self): - """ - trigger the start of the execution of the module. - Returns: - - """ - try: - self.run(self.one, self.module, self.result) - except OneException as e: - self.fail(msg="OpenNebula Exception: %s" % e) - - def run(self, one, module, result): - """ - to be implemented by subclass with the actual module actions. - Args: - one: the OpenNebula XMLRPC client - module: the Ansible Module object - result: the Ansible result - """ - raise NotImplementedError("Method requires implementation") diff --git a/ansible_collections/community/general/plugins/module_utils/proxmox.py b/ansible_collections/community/general/plugins/module_utils/proxmox.py deleted file mode 100644 index 94bd0b79..00000000 --- a/ansible_collections/community/general/plugins/module_utils/proxmox.py +++ /dev/null @@ -1,145 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2020, Tristan Le Guern -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -import atexit -import time -import re -import traceback - -PROXMOXER_IMP_ERR = None -try: - from proxmoxer import ProxmoxAPI - HAS_PROXMOXER = True -except ImportError: - HAS_PROXMOXER = False - PROXMOXER_IMP_ERR = traceback.format_exc() - - -from ansible.module_utils.basic import env_fallback, missing_required_lib -from ansible.module_utils.common.text.converters import to_native -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - - -def proxmox_auth_argument_spec(): - return dict( - api_host=dict(type='str', - required=True, - fallback=(env_fallback, ['PROXMOX_HOST']) - ), - api_user=dict(type='str', - required=True, - fallback=(env_fallback, ['PROXMOX_USER']) - ), - api_password=dict(type='str', - no_log=True, - fallback=(env_fallback, ['PROXMOX_PASSWORD']) - ), - api_token_id=dict(type='str', - no_log=False - ), - api_token_secret=dict(type='str', - no_log=True - ), - validate_certs=dict(type='bool', - default=False - ), - ) - - -def proxmox_to_ansible_bool(value): - '''Convert Proxmox representation of a boolean to be ansible-friendly''' - return True if value == 1 else False - - -def ansible_to_proxmox_bool(value): - '''Convert Ansible representation of a boolean to be proxmox-friendly''' - if value is None: - return None - - if not isinstance(value, bool): - raise ValueError("%s must be of type bool not %s" % (value, type(value))) - - return 1 if value else 0 - - -class ProxmoxAnsible(object): - """Base class for Proxmox modules""" - def __init__(self, module): - if not HAS_PROXMOXER: - module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR) - - self.module = module - self.proxmox_api = self._connect() - # Test token validity - try: - self.proxmox_api.version.get() - except Exception as e: - module.fail_json(msg='%s' % e, exception=traceback.format_exc()) - - def _connect(self): - api_host = self.module.params['api_host'] - api_user = self.module.params['api_user'] - api_password = self.module.params['api_password'] - api_token_id = self.module.params['api_token_id'] - api_token_secret = self.module.params['api_token_secret'] - validate_certs = self.module.params['validate_certs'] - - auth_args = {'user': api_user} - if api_password: - auth_args['password'] = api_password - else: - auth_args['token_name'] = api_token_id - auth_args['token_value'] = api_token_secret - - try: - return ProxmoxAPI(api_host, verify_ssl=validate_certs, **auth_args) - except Exception as e: - self.module.fail_json(msg='%s' % e, exception=traceback.format_exc()) - - def version(self): - apireturn = self.proxmox_api.version.get() - return LooseVersion(apireturn['version']) - - def get_node(self, node): - nodes = [n for n in self.proxmox_api.nodes.get() if n['node'] == node] - return nodes[0] if nodes else None - - def get_nextvmid(self): - vmid = self.proxmox_api.cluster.nextid.get() - return vmid - - def get_vmid(self, name, ignore_missing=False, choose_first_if_multiple=False): - vms = [vm['vmid'] for vm in self.proxmox_api.cluster.resources.get(type='vm') if vm.get('name') == name] - - if not vms: - if ignore_missing: - return None - - self.module.fail_json(msg='No VM with name %s found' % name) - elif len(vms) > 1: - if choose_first_if_multiple: - self.module.deprecate( - 'Multiple VMs with name %s found, choosing the first one. ' % name + - 'This will be an error in the future. To ensure the correct VM is used, ' + - 'also pass the vmid parameter.', - version='5.0.0', collection_name='community.general') - else: - self.module.fail_json(msg='Multiple VMs with name %s found, provide vmid instead' % name) - - return vms[0] - - def get_vm(self, vmid, ignore_missing=False): - vms = [vm for vm in self.proxmox_api.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)] - - if vms: - return vms[0] - else: - if ignore_missing: - return None - - self.module.fail_json(msg='VM with vmid %s does not exist in cluster' % vmid) diff --git a/ansible_collections/community/general/plugins/module_utils/rax.py b/ansible_collections/community/general/plugins/module_utils/rax.py deleted file mode 100644 index 84effee9..00000000 --- a/ansible_collections/community/general/plugins/module_utils/rax.py +++ /dev/null @@ -1,316 +0,0 @@ -# -*- coding: utf-8 -*- -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by -# Ansible still belong to the author of the module, and may assign their own -# license to the complete work. -# -# Copyright (c), Michael DeHaan , 2012-2013 -# -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -import os -import re -from uuid import UUID - -from ansible.module_utils.six import text_type, binary_type - -FINAL_STATUSES = ('ACTIVE', 'ERROR') -VOLUME_STATUS = ('available', 'attaching', 'creating', 'deleting', 'in-use', - 'error', 'error_deleting') - -CLB_ALGORITHMS = ['RANDOM', 'LEAST_CONNECTIONS', 'ROUND_ROBIN', - 'WEIGHTED_LEAST_CONNECTIONS', 'WEIGHTED_ROUND_ROBIN'] -CLB_PROTOCOLS = ['DNS_TCP', 'DNS_UDP', 'FTP', 'HTTP', 'HTTPS', 'IMAPS', - 'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL', 'POP3', 'POP3S', 'SMTP', - 'TCP', 'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM', 'SFTP'] - -NON_CALLABLES = (text_type, binary_type, bool, dict, int, list, type(None)) -PUBLIC_NET_ID = "00000000-0000-0000-0000-000000000000" -SERVICE_NET_ID = "11111111-1111-1111-1111-111111111111" - - -def rax_slugify(value): - """Prepend a key with rax_ and normalize the key name""" - return 'rax_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_')) - - -def rax_clb_node_to_dict(obj): - """Function to convert a CLB Node object to a dict""" - if not obj: - return {} - node = obj.to_dict() - node['id'] = obj.id - node['weight'] = obj.weight - return node - - -def rax_to_dict(obj, obj_type='standard'): - """Generic function to convert a pyrax object to a dict - - obj_type values: - standard - clb - server - - """ - instance = {} - for key in dir(obj): - value = getattr(obj, key) - if obj_type == 'clb' and key == 'nodes': - instance[key] = [] - for node in value: - instance[key].append(rax_clb_node_to_dict(node)) - elif (isinstance(value, list) and len(value) > 0 and - not isinstance(value[0], NON_CALLABLES)): - instance[key] = [] - for item in value: - instance[key].append(rax_to_dict(item)) - elif (isinstance(value, NON_CALLABLES) and not key.startswith('_')): - if obj_type == 'server': - if key == 'image': - if not value: - instance['rax_boot_source'] = 'volume' - else: - instance['rax_boot_source'] = 'local' - key = rax_slugify(key) - instance[key] = value - - if obj_type == 'server': - for attr in ['id', 'accessIPv4', 'name', 'status']: - instance[attr] = instance.get(rax_slugify(attr)) - - return instance - - -def rax_find_bootable_volume(module, rax_module, server, exit=True): - """Find a servers bootable volume""" - cs = rax_module.cloudservers - cbs = rax_module.cloud_blockstorage - server_id = rax_module.utils.get_id(server) - volumes = cs.volumes.get_server_volumes(server_id) - bootable_volumes = [] - for volume in volumes: - vol = cbs.get(volume) - if module.boolean(vol.bootable): - bootable_volumes.append(vol) - if not bootable_volumes: - if exit: - module.fail_json(msg='No bootable volumes could be found for ' - 'server %s' % server_id) - else: - return False - elif len(bootable_volumes) > 1: - if exit: - module.fail_json(msg='Multiple bootable volumes found for server ' - '%s' % server_id) - else: - return False - - return bootable_volumes[0] - - -def rax_find_image(module, rax_module, image, exit=True): - """Find a server image by ID or Name""" - cs = rax_module.cloudservers - try: - UUID(image) - except ValueError: - try: - image = cs.images.find(human_id=image) - except(cs.exceptions.NotFound, - cs.exceptions.NoUniqueMatch): - try: - image = cs.images.find(name=image) - except (cs.exceptions.NotFound, - cs.exceptions.NoUniqueMatch): - if exit: - module.fail_json(msg='No matching image found (%s)' % - image) - else: - return False - - return rax_module.utils.get_id(image) - - -def rax_find_volume(module, rax_module, name): - """Find a Block storage volume by ID or name""" - cbs = rax_module.cloud_blockstorage - try: - UUID(name) - volume = cbs.get(name) - except ValueError: - try: - volume = cbs.find(name=name) - except rax_module.exc.NotFound: - volume = None - except Exception as e: - module.fail_json(msg='%s' % e) - return volume - - -def rax_find_network(module, rax_module, network): - """Find a cloud network by ID or name""" - cnw = rax_module.cloud_networks - try: - UUID(network) - except ValueError: - if network.lower() == 'public': - return cnw.get_server_networks(PUBLIC_NET_ID) - elif network.lower() == 'private': - return cnw.get_server_networks(SERVICE_NET_ID) - else: - try: - network_obj = cnw.find_network_by_label(network) - except (rax_module.exceptions.NetworkNotFound, - rax_module.exceptions.NetworkLabelNotUnique): - module.fail_json(msg='No matching network found (%s)' % - network) - else: - return cnw.get_server_networks(network_obj) - else: - return cnw.get_server_networks(network) - - -def rax_find_server(module, rax_module, server): - """Find a Cloud Server by ID or name""" - cs = rax_module.cloudservers - try: - UUID(server) - server = cs.servers.get(server) - except ValueError: - servers = cs.servers.list(search_opts=dict(name='^%s$' % server)) - if not servers: - module.fail_json(msg='No Server was matched by name, ' - 'try using the Server ID instead') - if len(servers) > 1: - module.fail_json(msg='Multiple servers matched by name, ' - 'try using the Server ID instead') - - # We made it this far, grab the first and hopefully only server - # in the list - server = servers[0] - return server - - -def rax_find_loadbalancer(module, rax_module, loadbalancer): - """Find a Cloud Load Balancer by ID or name""" - clb = rax_module.cloud_loadbalancers - try: - found = clb.get(loadbalancer) - except Exception: - found = [] - for lb in clb.list(): - if loadbalancer == lb.name: - found.append(lb) - - if not found: - module.fail_json(msg='No loadbalancer was matched') - - if len(found) > 1: - module.fail_json(msg='Multiple loadbalancers matched') - - # We made it this far, grab the first and hopefully only item - # in the list - found = found[0] - - return found - - -def rax_argument_spec(): - """Return standard base dictionary used for the argument_spec - argument in AnsibleModule - - """ - return dict( - api_key=dict(type='str', aliases=['password'], no_log=True), - auth_endpoint=dict(type='str'), - credentials=dict(type='path', aliases=['creds_file']), - env=dict(type='str'), - identity_type=dict(type='str', default='rackspace'), - region=dict(type='str'), - tenant_id=dict(type='str'), - tenant_name=dict(type='str'), - username=dict(type='str'), - validate_certs=dict(type='bool', aliases=['verify_ssl']), - ) - - -def rax_required_together(): - """Return the default list used for the required_together argument to - AnsibleModule""" - return [['api_key', 'username']] - - -def setup_rax_module(module, rax_module, region_required=True): - """Set up pyrax in a standard way for all modules""" - rax_module.USER_AGENT = 'ansible/%s %s' % (module.ansible_version, - rax_module.USER_AGENT) - - api_key = module.params.get('api_key') - auth_endpoint = module.params.get('auth_endpoint') - credentials = module.params.get('credentials') - env = module.params.get('env') - identity_type = module.params.get('identity_type') - region = module.params.get('region') - tenant_id = module.params.get('tenant_id') - tenant_name = module.params.get('tenant_name') - username = module.params.get('username') - verify_ssl = module.params.get('validate_certs') - - if env is not None: - rax_module.set_environment(env) - - rax_module.set_setting('identity_type', identity_type) - if verify_ssl is not None: - rax_module.set_setting('verify_ssl', verify_ssl) - if auth_endpoint is not None: - rax_module.set_setting('auth_endpoint', auth_endpoint) - if tenant_id is not None: - rax_module.set_setting('tenant_id', tenant_id) - if tenant_name is not None: - rax_module.set_setting('tenant_name', tenant_name) - - try: - username = username or os.environ.get('RAX_USERNAME') - if not username: - username = rax_module.get_setting('keyring_username') - if username: - api_key = 'USE_KEYRING' - if not api_key: - api_key = os.environ.get('RAX_API_KEY') - credentials = (credentials or os.environ.get('RAX_CREDENTIALS') or - os.environ.get('RAX_CREDS_FILE')) - region = (region or os.environ.get('RAX_REGION') or - rax_module.get_setting('region')) - except KeyError as e: - module.fail_json(msg='Unable to load %s' % e.message) - - try: - if api_key and username: - if api_key == 'USE_KEYRING': - rax_module.keyring_auth(username, region=region) - else: - rax_module.set_credentials(username, api_key=api_key, - region=region) - elif credentials: - credentials = os.path.expanduser(credentials) - rax_module.set_credential_file(credentials, region=region) - else: - raise Exception('No credentials supplied!') - except Exception as e: - if e.message: - msg = str(e.message) - else: - msg = repr(e) - module.fail_json(msg=msg) - - if region_required and region not in rax_module.regions: - module.fail_json(msg='%s is not a valid region, must be one of: %s' % - (region, ','.join(rax_module.regions))) - - return rax_module diff --git a/ansible_collections/community/general/plugins/module_utils/redfish_utils.py b/ansible_collections/community/general/plugins/module_utils/redfish_utils.py deleted file mode 100644 index 378d8fa9..00000000 --- a/ansible_collections/community/general/plugins/module_utils/redfish_utils.py +++ /dev/null @@ -1,2982 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2017-2018 Dell EMC Inc. -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -import json -from ansible.module_utils.urls import open_url -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.common.text.converters import to_text -from ansible.module_utils.six.moves import http_client -from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError -from ansible.module_utils.six.moves.urllib.parse import urlparse - -GET_HEADERS = {'accept': 'application/json', 'OData-Version': '4.0'} -POST_HEADERS = {'content-type': 'application/json', 'accept': 'application/json', - 'OData-Version': '4.0'} -PATCH_HEADERS = {'content-type': 'application/json', 'accept': 'application/json', - 'OData-Version': '4.0'} -DELETE_HEADERS = {'accept': 'application/json', 'OData-Version': '4.0'} - -FAIL_MSG = 'Issuing a data modification command without specifying the '\ - 'ID of the target %(resource)s resource when there is more '\ - 'than one %(resource)s is no longer allowed. Use the `resource_id` '\ - 'option to specify the target %(resource)s ID.' - - -class RedfishUtils(object): - - def __init__(self, creds, root_uri, timeout, module, resource_id=None, - data_modification=False, strip_etag_quotes=False): - self.root_uri = root_uri - self.creds = creds - self.timeout = timeout - self.module = module - self.service_root = '/redfish/v1/' - self.resource_id = resource_id - self.data_modification = data_modification - self.strip_etag_quotes = strip_etag_quotes - self._init_session() - - def _auth_params(self, headers): - """ - Return tuple of required authentication params based on the presence - of a token in the self.creds dict. If using a token, set the - X-Auth-Token header in the `headers` param. - - :param headers: dict containing headers to send in request - :return: tuple of username, password and force_basic_auth - """ - if self.creds.get('token'): - username = None - password = None - force_basic_auth = False - headers['X-Auth-Token'] = self.creds['token'] - else: - username = self.creds['user'] - password = self.creds['pswd'] - force_basic_auth = True - return username, password, force_basic_auth - - # The following functions are to send GET/POST/PATCH/DELETE requests - def get_request(self, uri): - req_headers = dict(GET_HEADERS) - username, password, basic_auth = self._auth_params(req_headers) - try: - resp = open_url(uri, method="GET", headers=req_headers, - url_username=username, url_password=password, - force_basic_auth=basic_auth, validate_certs=False, - follow_redirects='all', - use_proxy=True, timeout=self.timeout) - data = json.loads(to_native(resp.read())) - headers = dict((k.lower(), v) for (k, v) in resp.info().items()) - except HTTPError as e: - msg = self._get_extended_message(e) - return {'ret': False, - 'msg': "HTTP Error %s on GET request to '%s', extended message: '%s'" - % (e.code, uri, msg), - 'status': e.code} - except URLError as e: - return {'ret': False, 'msg': "URL Error on GET request to '%s': '%s'" - % (uri, e.reason)} - # Almost all errors should be caught above, but just in case - except Exception as e: - return {'ret': False, - 'msg': "Failed GET request to '%s': '%s'" % (uri, to_text(e))} - return {'ret': True, 'data': data, 'headers': headers} - - def post_request(self, uri, pyld): - req_headers = dict(POST_HEADERS) - username, password, basic_auth = self._auth_params(req_headers) - try: - resp = open_url(uri, data=json.dumps(pyld), - headers=req_headers, method="POST", - url_username=username, url_password=password, - force_basic_auth=basic_auth, validate_certs=False, - follow_redirects='all', - use_proxy=True, timeout=self.timeout) - headers = dict((k.lower(), v) for (k, v) in resp.info().items()) - except HTTPError as e: - msg = self._get_extended_message(e) - return {'ret': False, - 'msg': "HTTP Error %s on POST request to '%s', extended message: '%s'" - % (e.code, uri, msg), - 'status': e.code} - except URLError as e: - return {'ret': False, 'msg': "URL Error on POST request to '%s': '%s'" - % (uri, e.reason)} - # Almost all errors should be caught above, but just in case - except Exception as e: - return {'ret': False, - 'msg': "Failed POST request to '%s': '%s'" % (uri, to_text(e))} - return {'ret': True, 'headers': headers, 'resp': resp} - - def patch_request(self, uri, pyld): - req_headers = dict(PATCH_HEADERS) - r = self.get_request(uri) - if r['ret']: - # Get etag from etag header or @odata.etag property - etag = r['headers'].get('etag') - if not etag: - etag = r['data'].get('@odata.etag') - if etag: - if self.strip_etag_quotes: - etag = etag.strip('"') - req_headers['If-Match'] = etag - username, password, basic_auth = self._auth_params(req_headers) - try: - resp = open_url(uri, data=json.dumps(pyld), - headers=req_headers, method="PATCH", - url_username=username, url_password=password, - force_basic_auth=basic_auth, validate_certs=False, - follow_redirects='all', - use_proxy=True, timeout=self.timeout) - except HTTPError as e: - msg = self._get_extended_message(e) - return {'ret': False, - 'msg': "HTTP Error %s on PATCH request to '%s', extended message: '%s'" - % (e.code, uri, msg), - 'status': e.code} - except URLError as e: - return {'ret': False, 'msg': "URL Error on PATCH request to '%s': '%s'" - % (uri, e.reason)} - # Almost all errors should be caught above, but just in case - except Exception as e: - return {'ret': False, - 'msg': "Failed PATCH request to '%s': '%s'" % (uri, to_text(e))} - return {'ret': True, 'resp': resp} - - def delete_request(self, uri, pyld=None): - req_headers = dict(DELETE_HEADERS) - username, password, basic_auth = self._auth_params(req_headers) - try: - data = json.dumps(pyld) if pyld else None - resp = open_url(uri, data=data, - headers=req_headers, method="DELETE", - url_username=username, url_password=password, - force_basic_auth=basic_auth, validate_certs=False, - follow_redirects='all', - use_proxy=True, timeout=self.timeout) - except HTTPError as e: - msg = self._get_extended_message(e) - return {'ret': False, - 'msg': "HTTP Error %s on DELETE request to '%s', extended message: '%s'" - % (e.code, uri, msg), - 'status': e.code} - except URLError as e: - return {'ret': False, 'msg': "URL Error on DELETE request to '%s': '%s'" - % (uri, e.reason)} - # Almost all errors should be caught above, but just in case - except Exception as e: - return {'ret': False, - 'msg': "Failed DELETE request to '%s': '%s'" % (uri, to_text(e))} - return {'ret': True, 'resp': resp} - - @staticmethod - def _get_extended_message(error): - """ - Get Redfish ExtendedInfo message from response payload if present - :param error: an HTTPError exception - :type error: HTTPError - :return: the ExtendedInfo message if present, else standard HTTP error - """ - msg = http_client.responses.get(error.code, '') - if error.code >= 400: - try: - body = error.read().decode('utf-8') - data = json.loads(body) - ext_info = data['error']['@Message.ExtendedInfo'] - msg = ext_info[0]['Message'] - except Exception: - pass - return msg - - def _init_session(self): - pass - - def _find_accountservice_resource(self): - response = self.get_request(self.root_uri + self.service_root) - if response['ret'] is False: - return response - data = response['data'] - if 'AccountService' not in data: - return {'ret': False, 'msg': "AccountService resource not found"} - else: - account_service = data["AccountService"]["@odata.id"] - response = self.get_request(self.root_uri + account_service) - if response['ret'] is False: - return response - data = response['data'] - accounts = data['Accounts']['@odata.id'] - if accounts[-1:] == '/': - accounts = accounts[:-1] - self.accounts_uri = accounts - return {'ret': True} - - def _find_sessionservice_resource(self): - response = self.get_request(self.root_uri + self.service_root) - if response['ret'] is False: - return response - data = response['data'] - if 'SessionService' not in data: - return {'ret': False, 'msg': "SessionService resource not found"} - else: - session_service = data["SessionService"]["@odata.id"] - response = self.get_request(self.root_uri + session_service) - if response['ret'] is False: - return response - data = response['data'] - sessions = data['Sessions']['@odata.id'] - if sessions[-1:] == '/': - sessions = sessions[:-1] - self.sessions_uri = sessions - return {'ret': True} - - def _get_resource_uri_by_id(self, uris, id_prop): - for uri in uris: - response = self.get_request(self.root_uri + uri) - if response['ret'] is False: - continue - data = response['data'] - if id_prop == data.get('Id'): - return uri - return None - - def _find_systems_resource(self): - response = self.get_request(self.root_uri + self.service_root) - if response['ret'] is False: - return response - data = response['data'] - if 'Systems' not in data: - return {'ret': False, 'msg': "Systems resource not found"} - response = self.get_request(self.root_uri + data['Systems']['@odata.id']) - if response['ret'] is False: - return response - self.systems_uris = [ - i['@odata.id'] for i in response['data'].get('Members', [])] - if not self.systems_uris: - return { - 'ret': False, - 'msg': "ComputerSystem's Members array is either empty or missing"} - self.systems_uri = self.systems_uris[0] - if self.data_modification: - if self.resource_id: - self.systems_uri = self._get_resource_uri_by_id(self.systems_uris, - self.resource_id) - if not self.systems_uri: - return { - 'ret': False, - 'msg': "System resource %s not found" % self.resource_id} - elif len(self.systems_uris) > 1: - self.module.fail_json(msg=FAIL_MSG % {'resource': 'System'}) - return {'ret': True} - - def _find_updateservice_resource(self): - response = self.get_request(self.root_uri + self.service_root) - if response['ret'] is False: - return response - data = response['data'] - if 'UpdateService' not in data: - return {'ret': False, 'msg': "UpdateService resource not found"} - else: - update = data["UpdateService"]["@odata.id"] - self.update_uri = update - response = self.get_request(self.root_uri + update) - if response['ret'] is False: - return response - data = response['data'] - self.firmware_uri = self.software_uri = None - if 'FirmwareInventory' in data: - self.firmware_uri = data['FirmwareInventory'][u'@odata.id'] - if 'SoftwareInventory' in data: - self.software_uri = data['SoftwareInventory'][u'@odata.id'] - return {'ret': True} - - def _find_chassis_resource(self): - response = self.get_request(self.root_uri + self.service_root) - if response['ret'] is False: - return response - data = response['data'] - if 'Chassis' not in data: - return {'ret': False, 'msg': "Chassis resource not found"} - chassis = data["Chassis"]["@odata.id"] - response = self.get_request(self.root_uri + chassis) - if response['ret'] is False: - return response - self.chassis_uris = [ - i['@odata.id'] for i in response['data'].get('Members', [])] - if not self.chassis_uris: - return {'ret': False, - 'msg': "Chassis Members array is either empty or missing"} - self.chassis_uri = self.chassis_uris[0] - if self.data_modification: - if self.resource_id: - self.chassis_uri = self._get_resource_uri_by_id(self.chassis_uris, - self.resource_id) - if not self.chassis_uri: - return { - 'ret': False, - 'msg': "Chassis resource %s not found" % self.resource_id} - elif len(self.chassis_uris) > 1: - self.module.fail_json(msg=FAIL_MSG % {'resource': 'Chassis'}) - return {'ret': True} - - def _find_managers_resource(self): - response = self.get_request(self.root_uri + self.service_root) - if response['ret'] is False: - return response - data = response['data'] - if 'Managers' not in data: - return {'ret': False, 'msg': "Manager resource not found"} - manager = data["Managers"]["@odata.id"] - response = self.get_request(self.root_uri + manager) - if response['ret'] is False: - return response - self.manager_uris = [ - i['@odata.id'] for i in response['data'].get('Members', [])] - if not self.manager_uris: - return {'ret': False, - 'msg': "Managers Members array is either empty or missing"} - self.manager_uri = self.manager_uris[0] - if self.data_modification: - if self.resource_id: - self.manager_uri = self._get_resource_uri_by_id(self.manager_uris, - self.resource_id) - if not self.manager_uri: - return { - 'ret': False, - 'msg': "Manager resource %s not found" % self.resource_id} - elif len(self.manager_uris) > 1: - self.module.fail_json(msg=FAIL_MSG % {'resource': 'Manager'}) - return {'ret': True} - - def _get_all_action_info_values(self, action): - """Retrieve all parameter values for an Action from ActionInfo. - Fall back to AllowableValue annotations if no ActionInfo found. - Return the result in an ActionInfo-like dictionary, keyed - by the name of the parameter. """ - ai = {} - if '@Redfish.ActionInfo' in action: - ai_uri = action['@Redfish.ActionInfo'] - response = self.get_request(self.root_uri + ai_uri) - if response['ret'] is True: - data = response['data'] - if 'Parameters' in data: - params = data['Parameters'] - ai = dict((p['Name'], p) - for p in params if 'Name' in p) - if not ai: - ai = dict((k[:-24], - {'AllowableValues': v}) for k, v in action.items() - if k.endswith('@Redfish.AllowableValues')) - return ai - - def _get_allowable_values(self, action, name, default_values=None): - if default_values is None: - default_values = [] - ai = self._get_all_action_info_values(action) - allowable_values = ai.get(name, {}).get('AllowableValues') - # fallback to default values - if allowable_values is None: - allowable_values = default_values - return allowable_values - - def get_logs(self): - log_svcs_uri_list = [] - list_of_logs = [] - properties = ['Severity', 'Created', 'EntryType', 'OemRecordFormat', - 'Message', 'MessageId', 'MessageArgs'] - - # Find LogService - response = self.get_request(self.root_uri + self.manager_uri) - if response['ret'] is False: - return response - data = response['data'] - if 'LogServices' not in data: - return {'ret': False, 'msg': "LogServices resource not found"} - - # Find all entries in LogServices - logs_uri = data["LogServices"]["@odata.id"] - response = self.get_request(self.root_uri + logs_uri) - if response['ret'] is False: - return response - data = response['data'] - for log_svcs_entry in data.get('Members', []): - response = self.get_request(self.root_uri + log_svcs_entry[u'@odata.id']) - if response['ret'] is False: - return response - _data = response['data'] - if 'Entries' in _data: - log_svcs_uri_list.append(_data['Entries'][u'@odata.id']) - - # For each entry in LogServices, get log name and all log entries - for log_svcs_uri in log_svcs_uri_list: - logs = {} - list_of_log_entries = [] - response = self.get_request(self.root_uri + log_svcs_uri) - if response['ret'] is False: - return response - data = response['data'] - logs['Description'] = data.get('Description', - 'Collection of log entries') - # Get all log entries for each type of log found - for logEntry in data.get('Members', []): - entry = {} - for prop in properties: - if prop in logEntry: - entry[prop] = logEntry.get(prop) - if entry: - list_of_log_entries.append(entry) - log_name = log_svcs_uri.split('/')[-1] - logs[log_name] = list_of_log_entries - list_of_logs.append(logs) - - # list_of_logs[logs{list_of_log_entries[entry{}]}] - return {'ret': True, 'entries': list_of_logs} - - def clear_logs(self): - # Find LogService - response = self.get_request(self.root_uri + self.manager_uri) - if response['ret'] is False: - return response - data = response['data'] - if 'LogServices' not in data: - return {'ret': False, 'msg': "LogServices resource not found"} - - # Find all entries in LogServices - logs_uri = data["LogServices"]["@odata.id"] - response = self.get_request(self.root_uri + logs_uri) - if response['ret'] is False: - return response - data = response['data'] - - for log_svcs_entry in data[u'Members']: - response = self.get_request(self.root_uri + log_svcs_entry["@odata.id"]) - if response['ret'] is False: - return response - _data = response['data'] - # Check to make sure option is available, otherwise error is ugly - if "Actions" in _data: - if "#LogService.ClearLog" in _data[u"Actions"]: - self.post_request(self.root_uri + _data[u"Actions"]["#LogService.ClearLog"]["target"], {}) - if response['ret'] is False: - return response - return {'ret': True} - - def aggregate(self, func, uri_list, uri_name): - ret = True - entries = [] - for uri in uri_list: - inventory = func(uri) - ret = inventory.pop('ret') and ret - if 'entries' in inventory: - entries.append(({uri_name: uri}, - inventory['entries'])) - return dict(ret=ret, entries=entries) - - def aggregate_chassis(self, func): - return self.aggregate(func, self.chassis_uris, 'chassis_uri') - - def aggregate_managers(self, func): - return self.aggregate(func, self.manager_uris, 'manager_uri') - - def aggregate_systems(self, func): - return self.aggregate(func, self.systems_uris, 'system_uri') - - def get_storage_controller_inventory(self, systems_uri): - result = {} - controller_list = [] - controller_results = [] - # Get these entries, but does not fail if not found - properties = ['CacheSummary', 'FirmwareVersion', 'Identifiers', - 'Location', 'Manufacturer', 'Model', 'Name', 'Id', - 'PartNumber', 'SerialNumber', 'SpeedGbps', 'Status'] - key = "StorageControllers" - - # Find Storage service - response = self.get_request(self.root_uri + systems_uri) - if response['ret'] is False: - return response - data = response['data'] - - if 'Storage' not in data: - return {'ret': False, 'msg': "Storage resource not found"} - - # Get a list of all storage controllers and build respective URIs - storage_uri = data['Storage']["@odata.id"] - response = self.get_request(self.root_uri + storage_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - - # Loop through Members and their StorageControllers - # and gather properties from each StorageController - if data[u'Members']: - for storage_member in data[u'Members']: - storage_member_uri = storage_member[u'@odata.id'] - response = self.get_request(self.root_uri + storage_member_uri) - data = response['data'] - - if key in data: - controller_list = data[key] - for controller in controller_list: - controller_result = {} - for property in properties: - if property in controller: - controller_result[property] = controller[property] - controller_results.append(controller_result) - result['entries'] = controller_results - return result - else: - return {'ret': False, 'msg': "Storage resource not found"} - - def get_multi_storage_controller_inventory(self): - return self.aggregate_systems(self.get_storage_controller_inventory) - - def get_disk_inventory(self, systems_uri): - result = {'entries': []} - controller_list = [] - # Get these entries, but does not fail if not found - properties = ['BlockSizeBytes', 'CapableSpeedGbs', 'CapacityBytes', - 'EncryptionAbility', 'EncryptionStatus', - 'FailurePredicted', 'HotspareType', 'Id', 'Identifiers', - 'Manufacturer', 'MediaType', 'Model', 'Name', - 'PartNumber', 'PhysicalLocation', 'Protocol', 'Revision', - 'RotationSpeedRPM', 'SerialNumber', 'Status'] - - # Find Storage service - response = self.get_request(self.root_uri + systems_uri) - if response['ret'] is False: - return response - data = response['data'] - - if 'SimpleStorage' not in data and 'Storage' not in data: - return {'ret': False, 'msg': "SimpleStorage and Storage resource \ - not found"} - - if 'Storage' in data: - # Get a list of all storage controllers and build respective URIs - storage_uri = data[u'Storage'][u'@odata.id'] - response = self.get_request(self.root_uri + storage_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - - if data[u'Members']: - for controller in data[u'Members']: - controller_list.append(controller[u'@odata.id']) - for c in controller_list: - uri = self.root_uri + c - response = self.get_request(uri) - if response['ret'] is False: - return response - data = response['data'] - controller_name = 'Controller 1' - if 'StorageControllers' in data: - sc = data['StorageControllers'] - if sc: - if 'Name' in sc[0]: - controller_name = sc[0]['Name'] - else: - sc_id = sc[0].get('Id', '1') - controller_name = 'Controller %s' % sc_id - drive_results = [] - if 'Drives' in data: - for device in data[u'Drives']: - disk_uri = self.root_uri + device[u'@odata.id'] - response = self.get_request(disk_uri) - data = response['data'] - - drive_result = {} - for property in properties: - if property in data: - if data[property] is not None: - drive_result[property] = data[property] - drive_results.append(drive_result) - drives = {'Controller': controller_name, - 'Drives': drive_results} - result["entries"].append(drives) - - if 'SimpleStorage' in data: - # Get a list of all storage controllers and build respective URIs - storage_uri = data["SimpleStorage"]["@odata.id"] - response = self.get_request(self.root_uri + storage_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - - for controller in data[u'Members']: - controller_list.append(controller[u'@odata.id']) - - for c in controller_list: - uri = self.root_uri + c - response = self.get_request(uri) - if response['ret'] is False: - return response - data = response['data'] - if 'Name' in data: - controller_name = data['Name'] - else: - sc_id = data.get('Id', '1') - controller_name = 'Controller %s' % sc_id - drive_results = [] - for device in data[u'Devices']: - drive_result = {} - for property in properties: - if property in device: - drive_result[property] = device[property] - drive_results.append(drive_result) - drives = {'Controller': controller_name, - 'Drives': drive_results} - result["entries"].append(drives) - - return result - - def get_multi_disk_inventory(self): - return self.aggregate_systems(self.get_disk_inventory) - - def get_volume_inventory(self, systems_uri): - result = {'entries': []} - controller_list = [] - volume_list = [] - # Get these entries, but does not fail if not found - properties = ['Id', 'Name', 'RAIDType', 'VolumeType', 'BlockSizeBytes', - 'Capacity', 'CapacityBytes', 'CapacitySources', - 'Encrypted', 'EncryptionTypes', 'Identifiers', - 'Operations', 'OptimumIOSizeBytes', 'AccessCapabilities', - 'AllocatedPools', 'Status'] - - # Find Storage service - response = self.get_request(self.root_uri + systems_uri) - if response['ret'] is False: - return response - data = response['data'] - - if 'SimpleStorage' not in data and 'Storage' not in data: - return {'ret': False, 'msg': "SimpleStorage and Storage resource \ - not found"} - - if 'Storage' in data: - # Get a list of all storage controllers and build respective URIs - storage_uri = data[u'Storage'][u'@odata.id'] - response = self.get_request(self.root_uri + storage_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - - if data.get('Members'): - for controller in data[u'Members']: - controller_list.append(controller[u'@odata.id']) - for c in controller_list: - uri = self.root_uri + c - response = self.get_request(uri) - if response['ret'] is False: - return response - data = response['data'] - controller_name = 'Controller 1' - if 'StorageControllers' in data: - sc = data['StorageControllers'] - if sc: - if 'Name' in sc[0]: - controller_name = sc[0]['Name'] - else: - sc_id = sc[0].get('Id', '1') - controller_name = 'Controller %s' % sc_id - volume_results = [] - if 'Volumes' in data: - # Get a list of all volumes and build respective URIs - volumes_uri = data[u'Volumes'][u'@odata.id'] - response = self.get_request(self.root_uri + volumes_uri) - data = response['data'] - - if data.get('Members'): - for volume in data[u'Members']: - volume_list.append(volume[u'@odata.id']) - for v in volume_list: - uri = self.root_uri + v - response = self.get_request(uri) - if response['ret'] is False: - return response - data = response['data'] - - volume_result = {} - for property in properties: - if property in data: - if data[property] is not None: - volume_result[property] = data[property] - - # Get related Drives Id - drive_id_list = [] - if 'Links' in data: - if 'Drives' in data[u'Links']: - for link in data[u'Links'][u'Drives']: - drive_id_link = link[u'@odata.id'] - drive_id = drive_id_link.split("/")[-1] - drive_id_list.append({'Id': drive_id}) - volume_result['Linked_drives'] = drive_id_list - volume_results.append(volume_result) - volumes = {'Controller': controller_name, - 'Volumes': volume_results} - result["entries"].append(volumes) - else: - return {'ret': False, 'msg': "Storage resource not found"} - - return result - - def get_multi_volume_inventory(self): - return self.aggregate_systems(self.get_volume_inventory) - - def manage_indicator_led(self, command): - result = {} - key = 'IndicatorLED' - - payloads = {'IndicatorLedOn': 'Lit', 'IndicatorLedOff': 'Off', "IndicatorLedBlink": 'Blinking'} - - result = {} - response = self.get_request(self.root_uri + self.chassis_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - if key not in data: - return {'ret': False, 'msg': "Key %s not found" % key} - - if command in payloads.keys(): - payload = {'IndicatorLED': payloads[command]} - response = self.patch_request(self.root_uri + self.chassis_uri, payload) - if response['ret'] is False: - return response - else: - return {'ret': False, 'msg': 'Invalid command'} - - return result - - def _map_reset_type(self, reset_type, allowable_values): - equiv_types = { - 'On': 'ForceOn', - 'ForceOn': 'On', - 'ForceOff': 'GracefulShutdown', - 'GracefulShutdown': 'ForceOff', - 'GracefulRestart': 'ForceRestart', - 'ForceRestart': 'GracefulRestart' - } - - if reset_type in allowable_values: - return reset_type - if reset_type not in equiv_types: - return reset_type - mapped_type = equiv_types[reset_type] - if mapped_type in allowable_values: - return mapped_type - return reset_type - - def manage_system_power(self, command): - return self.manage_power(command, self.systems_uri, - '#ComputerSystem.Reset') - - def manage_manager_power(self, command): - return self.manage_power(command, self.manager_uri, - '#Manager.Reset') - - def manage_power(self, command, resource_uri, action_name): - key = "Actions" - reset_type_values = ['On', 'ForceOff', 'GracefulShutdown', - 'GracefulRestart', 'ForceRestart', 'Nmi', - 'ForceOn', 'PushPowerButton', 'PowerCycle'] - - # command should be PowerOn, PowerForceOff, etc. - if not command.startswith('Power'): - return {'ret': False, 'msg': 'Invalid Command (%s)' % command} - reset_type = command[5:] - - # map Reboot to a ResetType that does a reboot - if reset_type == 'Reboot': - reset_type = 'GracefulRestart' - - if reset_type not in reset_type_values: - return {'ret': False, 'msg': 'Invalid Command (%s)' % command} - - # read the resource and get the current power state - response = self.get_request(self.root_uri + resource_uri) - if response['ret'] is False: - return response - data = response['data'] - power_state = data.get('PowerState') - - # if power is already in target state, nothing to do - if power_state == "On" and reset_type in ['On', 'ForceOn']: - return {'ret': True, 'changed': False} - if power_state == "Off" and reset_type in ['GracefulShutdown', 'ForceOff']: - return {'ret': True, 'changed': False} - - # get the reset Action and target URI - if key not in data or action_name not in data[key]: - return {'ret': False, 'msg': 'Action %s not found' % action_name} - reset_action = data[key][action_name] - if 'target' not in reset_action: - return {'ret': False, - 'msg': 'target URI missing from Action %s' % action_name} - action_uri = reset_action['target'] - - # get AllowableValues - ai = self._get_all_action_info_values(reset_action) - allowable_values = ai.get('ResetType', {}).get('AllowableValues', []) - - # map ResetType to an allowable value if needed - if reset_type not in allowable_values: - reset_type = self._map_reset_type(reset_type, allowable_values) - - # define payload - payload = {'ResetType': reset_type} - - # POST to Action URI - response = self.post_request(self.root_uri + action_uri, payload) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True} - - def _find_account_uri(self, username=None, acct_id=None): - if not any((username, acct_id)): - return {'ret': False, 'msg': - 'Must provide either account_id or account_username'} - - response = self.get_request(self.root_uri + self.accounts_uri) - if response['ret'] is False: - return response - data = response['data'] - - uris = [a.get('@odata.id') for a in data.get('Members', []) if - a.get('@odata.id')] - for uri in uris: - response = self.get_request(self.root_uri + uri) - if response['ret'] is False: - continue - data = response['data'] - headers = response['headers'] - if username: - if username == data.get('UserName'): - return {'ret': True, 'data': data, - 'headers': headers, 'uri': uri} - if acct_id: - if acct_id == data.get('Id'): - return {'ret': True, 'data': data, - 'headers': headers, 'uri': uri} - - return {'ret': False, 'no_match': True, 'msg': - 'No account with the given account_id or account_username found'} - - def _find_empty_account_slot(self): - response = self.get_request(self.root_uri + self.accounts_uri) - if response['ret'] is False: - return response - data = response['data'] - - uris = [a.get('@odata.id') for a in data.get('Members', []) if - a.get('@odata.id')] - if uris: - # first slot may be reserved, so move to end of list - uris += [uris.pop(0)] - for uri in uris: - response = self.get_request(self.root_uri + uri) - if response['ret'] is False: - continue - data = response['data'] - headers = response['headers'] - if data.get('UserName') == "" and not data.get('Enabled', True): - return {'ret': True, 'data': data, - 'headers': headers, 'uri': uri} - - return {'ret': False, 'no_match': True, 'msg': - 'No empty account slot found'} - - def list_users(self): - result = {} - # listing all users has always been slower than other operations, why? - user_list = [] - users_results = [] - # Get these entries, but does not fail if not found - properties = ['Id', 'Name', 'UserName', 'RoleId', 'Locked', 'Enabled'] - - response = self.get_request(self.root_uri + self.accounts_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - - for users in data.get('Members', []): - user_list.append(users[u'@odata.id']) # user_list[] are URIs - - # for each user, get details - for uri in user_list: - user = {} - response = self.get_request(self.root_uri + uri) - if response['ret'] is False: - return response - data = response['data'] - - for property in properties: - if property in data: - user[property] = data[property] - - users_results.append(user) - result["entries"] = users_results - return result - - def add_user_via_patch(self, user): - if user.get('account_id'): - # If Id slot specified, use it - response = self._find_account_uri(acct_id=user.get('account_id')) - else: - # Otherwise find first empty slot - response = self._find_empty_account_slot() - - if not response['ret']: - return response - uri = response['uri'] - payload = {} - if user.get('account_username'): - payload['UserName'] = user.get('account_username') - if user.get('account_password'): - payload['Password'] = user.get('account_password') - if user.get('account_roleid'): - payload['RoleId'] = user.get('account_roleid') - response = self.patch_request(self.root_uri + uri, payload) - if response['ret'] is False: - return response - return {'ret': True} - - def add_user(self, user): - if not user.get('account_username'): - return {'ret': False, 'msg': - 'Must provide account_username for AddUser command'} - - response = self._find_account_uri(username=user.get('account_username')) - if response['ret']: - # account_username already exists, nothing to do - return {'ret': True, 'changed': False} - - response = self.get_request(self.root_uri + self.accounts_uri) - if not response['ret']: - return response - headers = response['headers'] - - if 'allow' in headers: - methods = [m.strip() for m in headers.get('allow').split(',')] - if 'POST' not in methods: - # if Allow header present and POST not listed, add via PATCH - return self.add_user_via_patch(user) - - payload = {} - if user.get('account_username'): - payload['UserName'] = user.get('account_username') - if user.get('account_password'): - payload['Password'] = user.get('account_password') - if user.get('account_roleid'): - payload['RoleId'] = user.get('account_roleid') - if user.get('account_id'): - payload['Id'] = user.get('account_id') - - response = self.post_request(self.root_uri + self.accounts_uri, payload) - if not response['ret']: - if response.get('status') == 405: - # if POST returned a 405, try to add via PATCH - return self.add_user_via_patch(user) - else: - return response - return {'ret': True} - - def enable_user(self, user): - response = self._find_account_uri(username=user.get('account_username'), - acct_id=user.get('account_id')) - if not response['ret']: - return response - uri = response['uri'] - data = response['data'] - - if data.get('Enabled', True): - # account already enabled, nothing to do - return {'ret': True, 'changed': False} - - payload = {'Enabled': True} - response = self.patch_request(self.root_uri + uri, payload) - if response['ret'] is False: - return response - return {'ret': True} - - def delete_user_via_patch(self, user, uri=None, data=None): - if not uri: - response = self._find_account_uri(username=user.get('account_username'), - acct_id=user.get('account_id')) - if not response['ret']: - return response - uri = response['uri'] - data = response['data'] - - if data and data.get('UserName') == '' and not data.get('Enabled', False): - # account UserName already cleared, nothing to do - return {'ret': True, 'changed': False} - - payload = {'UserName': ''} - if data.get('Enabled', False): - payload['Enabled'] = False - response = self.patch_request(self.root_uri + uri, payload) - if response['ret'] is False: - return response - return {'ret': True} - - def delete_user(self, user): - response = self._find_account_uri(username=user.get('account_username'), - acct_id=user.get('account_id')) - if not response['ret']: - if response.get('no_match'): - # account does not exist, nothing to do - return {'ret': True, 'changed': False} - else: - # some error encountered - return response - - uri = response['uri'] - headers = response['headers'] - data = response['data'] - - if 'allow' in headers: - methods = [m.strip() for m in headers.get('allow').split(',')] - if 'DELETE' not in methods: - # if Allow header present and DELETE not listed, del via PATCH - return self.delete_user_via_patch(user, uri=uri, data=data) - - response = self.delete_request(self.root_uri + uri) - if not response['ret']: - if response.get('status') == 405: - # if DELETE returned a 405, try to delete via PATCH - return self.delete_user_via_patch(user, uri=uri, data=data) - else: - return response - return {'ret': True} - - def disable_user(self, user): - response = self._find_account_uri(username=user.get('account_username'), - acct_id=user.get('account_id')) - if not response['ret']: - return response - uri = response['uri'] - data = response['data'] - - if not data.get('Enabled'): - # account already disabled, nothing to do - return {'ret': True, 'changed': False} - - payload = {'Enabled': False} - response = self.patch_request(self.root_uri + uri, payload) - if response['ret'] is False: - return response - return {'ret': True} - - def update_user_role(self, user): - if not user.get('account_roleid'): - return {'ret': False, 'msg': - 'Must provide account_roleid for UpdateUserRole command'} - - response = self._find_account_uri(username=user.get('account_username'), - acct_id=user.get('account_id')) - if not response['ret']: - return response - uri = response['uri'] - data = response['data'] - - if data.get('RoleId') == user.get('account_roleid'): - # account already has RoleId , nothing to do - return {'ret': True, 'changed': False} - - payload = {'RoleId': user.get('account_roleid')} - response = self.patch_request(self.root_uri + uri, payload) - if response['ret'] is False: - return response - return {'ret': True} - - def update_user_password(self, user): - response = self._find_account_uri(username=user.get('account_username'), - acct_id=user.get('account_id')) - if not response['ret']: - return response - uri = response['uri'] - payload = {'Password': user['account_password']} - response = self.patch_request(self.root_uri + uri, payload) - if response['ret'] is False: - return response - return {'ret': True} - - def update_user_name(self, user): - if not user.get('account_updatename'): - return {'ret': False, 'msg': - 'Must provide account_updatename for UpdateUserName command'} - - response = self._find_account_uri(username=user.get('account_username'), - acct_id=user.get('account_id')) - if not response['ret']: - return response - uri = response['uri'] - payload = {'UserName': user['account_updatename']} - response = self.patch_request(self.root_uri + uri, payload) - if response['ret'] is False: - return response - return {'ret': True} - - def update_accountservice_properties(self, user): - if user.get('account_properties') is None: - return {'ret': False, 'msg': - 'Must provide account_properties for UpdateAccountServiceProperties command'} - account_properties = user.get('account_properties') - - # Find AccountService - response = self.get_request(self.root_uri + self.service_root) - if response['ret'] is False: - return response - data = response['data'] - if 'AccountService' not in data: - return {'ret': False, 'msg': "AccountService resource not found"} - accountservice_uri = data["AccountService"]["@odata.id"] - - # Check support or not - response = self.get_request(self.root_uri + accountservice_uri) - if response['ret'] is False: - return response - data = response['data'] - for property_name in account_properties.keys(): - if property_name not in data: - return {'ret': False, 'msg': - 'property %s not supported' % property_name} - - # if properties is already matched, nothing to do - need_change = False - for property_name in account_properties.keys(): - if account_properties[property_name] != data[property_name]: - need_change = True - break - - if not need_change: - return {'ret': True, 'changed': False, 'msg': "AccountService properties already set"} - - payload = account_properties - response = self.patch_request(self.root_uri + accountservice_uri, payload) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True, 'msg': "Modified AccountService properties"} - - def get_sessions(self): - result = {} - # listing all users has always been slower than other operations, why? - session_list = [] - sessions_results = [] - # Get these entries, but does not fail if not found - properties = ['Description', 'Id', 'Name', 'UserName'] - - response = self.get_request(self.root_uri + self.sessions_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - - for sessions in data[u'Members']: - session_list.append(sessions[u'@odata.id']) # session_list[] are URIs - - # for each session, get details - for uri in session_list: - session = {} - response = self.get_request(self.root_uri + uri) - if response['ret'] is False: - return response - data = response['data'] - - for property in properties: - if property in data: - session[property] = data[property] - - sessions_results.append(session) - result["entries"] = sessions_results - return result - - def clear_sessions(self): - response = self.get_request(self.root_uri + self.sessions_uri) - if response['ret'] is False: - return response - data = response['data'] - - # if no active sessions, return as success - if data['Members@odata.count'] == 0: - return {'ret': True, 'changed': False, 'msg': "There is no active sessions"} - - # loop to delete every active session - for session in data[u'Members']: - response = self.delete_request(self.root_uri + session[u'@odata.id']) - if response['ret'] is False: - return response - - return {'ret': True, 'changed': True, 'msg': "Clear all sessions successfully"} - - def create_session(self): - if not self.creds.get('user') or not self.creds.get('pswd'): - return {'ret': False, 'msg': - 'Must provide the username and password parameters for ' - 'the CreateSession command'} - - payload = { - 'UserName': self.creds['user'], - 'Password': self.creds['pswd'] - } - response = self.post_request(self.root_uri + self.sessions_uri, payload) - if response['ret'] is False: - return response - - headers = response['headers'] - if 'x-auth-token' not in headers: - return {'ret': False, 'msg': - 'The service did not return the X-Auth-Token header in ' - 'the response from the Sessions collection POST'} - - if 'location' not in headers: - self.module.warn( - 'The service did not return the Location header for the ' - 'session URL in the response from the Sessions collection ' - 'POST') - session_uri = None - else: - session_uri = urlparse(headers.get('location')).path - - session = dict() - session['token'] = headers.get('x-auth-token') - session['uri'] = session_uri - return {'ret': True, 'changed': True, 'session': session, - 'msg': 'Session created successfully'} - - def delete_session(self, session_uri): - if not session_uri: - return {'ret': False, 'msg': - 'Must provide the session_uri parameter for the ' - 'DeleteSession command'} - - response = self.delete_request(self.root_uri + session_uri) - if response['ret'] is False: - return response - - return {'ret': True, 'changed': True, - 'msg': 'Session deleted successfully'} - - def get_firmware_update_capabilities(self): - result = {} - response = self.get_request(self.root_uri + self.update_uri) - if response['ret'] is False: - return response - - result['ret'] = True - - result['entries'] = {} - - data = response['data'] - - if "Actions" in data: - actions = data['Actions'] - if len(actions) > 0: - for key in actions.keys(): - action = actions.get(key) - if 'title' in action: - title = action['title'] - else: - title = key - result['entries'][title] = action.get('TransferProtocol@Redfish.AllowableValues', - ["Key TransferProtocol@Redfish.AllowableValues not found"]) - else: - return {'ret': "False", 'msg': "Actions list is empty."} - else: - return {'ret': "False", 'msg': "Key Actions not found."} - return result - - def _software_inventory(self, uri): - result = {} - response = self.get_request(self.root_uri + uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - - result['entries'] = [] - for member in data[u'Members']: - uri = self.root_uri + member[u'@odata.id'] - # Get details for each software or firmware member - response = self.get_request(uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - software = {} - # Get these standard properties if present - for key in ['Name', 'Id', 'Status', 'Version', 'Updateable', - 'SoftwareId', 'LowestSupportedVersion', 'Manufacturer', - 'ReleaseDate']: - if key in data: - software[key] = data.get(key) - result['entries'].append(software) - return result - - def get_firmware_inventory(self): - if self.firmware_uri is None: - return {'ret': False, 'msg': 'No FirmwareInventory resource found'} - else: - return self._software_inventory(self.firmware_uri) - - def get_software_inventory(self): - if self.software_uri is None: - return {'ret': False, 'msg': 'No SoftwareInventory resource found'} - else: - return self._software_inventory(self.software_uri) - - def simple_update(self, update_opts): - image_uri = update_opts.get('update_image_uri') - protocol = update_opts.get('update_protocol') - targets = update_opts.get('update_targets') - creds = update_opts.get('update_creds') - - if not image_uri: - return {'ret': False, 'msg': - 'Must specify update_image_uri for the SimpleUpdate command'} - - response = self.get_request(self.root_uri + self.update_uri) - if response['ret'] is False: - return response - data = response['data'] - if 'Actions' not in data: - return {'ret': False, 'msg': 'Service does not support SimpleUpdate'} - if '#UpdateService.SimpleUpdate' not in data['Actions']: - return {'ret': False, 'msg': 'Service does not support SimpleUpdate'} - action = data['Actions']['#UpdateService.SimpleUpdate'] - if 'target' not in action: - return {'ret': False, 'msg': 'Service does not support SimpleUpdate'} - update_uri = action['target'] - if protocol: - default_values = ['CIFS', 'FTP', 'SFTP', 'HTTP', 'HTTPS', 'NSF', - 'SCP', 'TFTP', 'OEM', 'NFS'] - allowable_values = self._get_allowable_values(action, - 'TransferProtocol', - default_values) - if protocol not in allowable_values: - return {'ret': False, - 'msg': 'Specified update_protocol (%s) not supported ' - 'by service. Supported protocols: %s' % - (protocol, allowable_values)} - if targets: - allowable_values = self._get_allowable_values(action, 'Targets') - if allowable_values: - for target in targets: - if target not in allowable_values: - return {'ret': False, - 'msg': 'Specified target (%s) not supported ' - 'by service. Supported targets: %s' % - (target, allowable_values)} - - payload = { - 'ImageURI': image_uri - } - if protocol: - payload["TransferProtocol"] = protocol - if targets: - payload["Targets"] = targets - if creds: - if creds.get('username'): - payload["Username"] = creds.get('username') - if creds.get('password'): - payload["Password"] = creds.get('password') - response = self.post_request(self.root_uri + update_uri, payload) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True, - 'msg': "SimpleUpdate requested"} - - def get_bios_attributes(self, systems_uri): - result = {} - bios_attributes = {} - key = "Bios" - - # Search for 'key' entry and extract URI from it - response = self.get_request(self.root_uri + systems_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - - if key not in data: - return {'ret': False, 'msg': "Key %s not found" % key} - - bios_uri = data[key]["@odata.id"] - - response = self.get_request(self.root_uri + bios_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - for attribute in data[u'Attributes'].items(): - bios_attributes[attribute[0]] = attribute[1] - result["entries"] = bios_attributes - return result - - def get_multi_bios_attributes(self): - return self.aggregate_systems(self.get_bios_attributes) - - def _get_boot_options_dict(self, boot): - # Get these entries from BootOption, if present - properties = ['DisplayName', 'BootOptionReference'] - - # Retrieve BootOptions if present - if 'BootOptions' in boot and '@odata.id' in boot['BootOptions']: - boot_options_uri = boot['BootOptions']["@odata.id"] - # Get BootOptions resource - response = self.get_request(self.root_uri + boot_options_uri) - if response['ret'] is False: - return {} - data = response['data'] - - # Retrieve Members array - if 'Members' not in data: - return {} - members = data['Members'] - else: - members = [] - - # Build dict of BootOptions keyed by BootOptionReference - boot_options_dict = {} - for member in members: - if '@odata.id' not in member: - return {} - boot_option_uri = member['@odata.id'] - response = self.get_request(self.root_uri + boot_option_uri) - if response['ret'] is False: - return {} - data = response['data'] - if 'BootOptionReference' not in data: - return {} - boot_option_ref = data['BootOptionReference'] - - # fetch the props to display for this boot device - boot_props = {} - for prop in properties: - if prop in data: - boot_props[prop] = data[prop] - - boot_options_dict[boot_option_ref] = boot_props - - return boot_options_dict - - def get_boot_order(self, systems_uri): - result = {} - - # Retrieve System resource - response = self.get_request(self.root_uri + systems_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - - # Confirm needed Boot properties are present - if 'Boot' not in data or 'BootOrder' not in data['Boot']: - return {'ret': False, 'msg': "Key BootOrder not found"} - - boot = data['Boot'] - boot_order = boot['BootOrder'] - boot_options_dict = self._get_boot_options_dict(boot) - - # Build boot device list - boot_device_list = [] - for ref in boot_order: - boot_device_list.append( - boot_options_dict.get(ref, {'BootOptionReference': ref})) - - result["entries"] = boot_device_list - return result - - def get_multi_boot_order(self): - return self.aggregate_systems(self.get_boot_order) - - def get_boot_override(self, systems_uri): - result = {} - - properties = ["BootSourceOverrideEnabled", "BootSourceOverrideTarget", - "BootSourceOverrideMode", "UefiTargetBootSourceOverride", "BootSourceOverrideTarget@Redfish.AllowableValues"] - - response = self.get_request(self.root_uri + systems_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - - if 'Boot' not in data: - return {'ret': False, 'msg': "Key Boot not found"} - - boot = data['Boot'] - - boot_overrides = {} - if "BootSourceOverrideEnabled" in boot: - if boot["BootSourceOverrideEnabled"] is not False: - for property in properties: - if property in boot: - if boot[property] is not None: - boot_overrides[property] = boot[property] - else: - return {'ret': False, 'msg': "No boot override is enabled."} - - result['entries'] = boot_overrides - return result - - def get_multi_boot_override(self): - return self.aggregate_systems(self.get_boot_override) - - def set_bios_default_settings(self): - result = {} - key = "Bios" - - # Search for 'key' entry and extract URI from it - response = self.get_request(self.root_uri + self.systems_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - - if key not in data: - return {'ret': False, 'msg': "Key %s not found" % key} - - bios_uri = data[key]["@odata.id"] - - # Extract proper URI - response = self.get_request(self.root_uri + bios_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - reset_bios_settings_uri = data["Actions"]["#Bios.ResetBios"]["target"] - - response = self.post_request(self.root_uri + reset_bios_settings_uri, {}) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True, 'msg': "Set BIOS to default settings"} - - def set_boot_override(self, boot_opts): - result = {} - key = "Boot" - - bootdevice = boot_opts.get('bootdevice') - uefi_target = boot_opts.get('uefi_target') - boot_next = boot_opts.get('boot_next') - override_enabled = boot_opts.get('override_enabled') - boot_override_mode = boot_opts.get('boot_override_mode') - - if not bootdevice and override_enabled != 'Disabled': - return {'ret': False, - 'msg': "bootdevice option required for temporary boot override"} - - # Search for 'key' entry and extract URI from it - response = self.get_request(self.root_uri + self.systems_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - - if key not in data: - return {'ret': False, 'msg': "Key %s not found" % key} - - boot = data[key] - - if override_enabled != 'Disabled': - annotation = 'BootSourceOverrideTarget@Redfish.AllowableValues' - if annotation in boot: - allowable_values = boot[annotation] - if isinstance(allowable_values, list) and bootdevice not in allowable_values: - return {'ret': False, - 'msg': "Boot device %s not in list of allowable values (%s)" % - (bootdevice, allowable_values)} - - # read existing values - cur_enabled = boot.get('BootSourceOverrideEnabled') - target = boot.get('BootSourceOverrideTarget') - cur_uefi_target = boot.get('UefiTargetBootSourceOverride') - cur_boot_next = boot.get('BootNext') - cur_override_mode = boot.get('BootSourceOverrideMode') - - if override_enabled == 'Disabled': - payload = { - 'Boot': { - 'BootSourceOverrideEnabled': override_enabled - } - } - elif bootdevice == 'UefiTarget': - if not uefi_target: - return {'ret': False, - 'msg': "uefi_target option required to SetOneTimeBoot for UefiTarget"} - if override_enabled == cur_enabled and target == bootdevice and uefi_target == cur_uefi_target: - # If properties are already set, no changes needed - return {'ret': True, 'changed': False} - payload = { - 'Boot': { - 'BootSourceOverrideEnabled': override_enabled, - 'BootSourceOverrideTarget': bootdevice, - 'UefiTargetBootSourceOverride': uefi_target - } - } - elif bootdevice == 'UefiBootNext': - if not boot_next: - return {'ret': False, - 'msg': "boot_next option required to SetOneTimeBoot for UefiBootNext"} - if cur_enabled == override_enabled and target == bootdevice and boot_next == cur_boot_next: - # If properties are already set, no changes needed - return {'ret': True, 'changed': False} - payload = { - 'Boot': { - 'BootSourceOverrideEnabled': override_enabled, - 'BootSourceOverrideTarget': bootdevice, - 'BootNext': boot_next - } - } - else: - if (cur_enabled == override_enabled and target == bootdevice and - (cur_override_mode == boot_override_mode or not boot_override_mode)): - # If properties are already set, no changes needed - return {'ret': True, 'changed': False} - payload = { - 'Boot': { - 'BootSourceOverrideEnabled': override_enabled, - 'BootSourceOverrideTarget': bootdevice - } - } - if boot_override_mode: - payload['Boot']['BootSourceOverrideMode'] = boot_override_mode - - response = self.patch_request(self.root_uri + self.systems_uri, payload) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True} - - def set_bios_attributes(self, attributes): - result = {} - key = "Bios" - - # Search for 'key' entry and extract URI from it - response = self.get_request(self.root_uri + self.systems_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - - if key not in data: - return {'ret': False, 'msg': "Key %s not found" % key} - - bios_uri = data[key]["@odata.id"] - - # Extract proper URI - response = self.get_request(self.root_uri + bios_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - - # Make a copy of the attributes dict - attrs_to_patch = dict(attributes) - # List to hold attributes not found - attrs_bad = {} - - # Check the attributes - for attr_name, attr_value in attributes.items(): - # Check if attribute exists - if attr_name not in data[u'Attributes']: - # Remove and proceed to next attribute if this isn't valid - attrs_bad.update({attr_name: attr_value}) - del attrs_to_patch[attr_name] - continue - - # If already set to requested value, remove it from PATCH payload - if data[u'Attributes'][attr_name] == attributes[attr_name]: - del attrs_to_patch[attr_name] - - warning = "" - if attrs_bad: - warning = "Incorrect attributes %s" % (attrs_bad) - - # Return success w/ changed=False if no attrs need to be changed - if not attrs_to_patch: - return {'ret': True, 'changed': False, - 'msg': "BIOS attributes already set", - 'warning': warning} - - # Get the SettingsObject URI - set_bios_attr_uri = data["@Redfish.Settings"]["SettingsObject"]["@odata.id"] - - # Construct payload and issue PATCH command - payload = {"Attributes": attrs_to_patch} - response = self.patch_request(self.root_uri + set_bios_attr_uri, payload) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True, - 'msg': "Modified BIOS attributes %s" % (attrs_to_patch), - 'warning': warning} - - def set_boot_order(self, boot_list): - if not boot_list: - return {'ret': False, - 'msg': "boot_order list required for SetBootOrder command"} - - systems_uri = self.systems_uri - response = self.get_request(self.root_uri + systems_uri) - if response['ret'] is False: - return response - data = response['data'] - - # Confirm needed Boot properties are present - if 'Boot' not in data or 'BootOrder' not in data['Boot']: - return {'ret': False, 'msg': "Key BootOrder not found"} - - boot = data['Boot'] - boot_order = boot['BootOrder'] - boot_options_dict = self._get_boot_options_dict(boot) - - # validate boot_list against BootOptionReferences if available - if boot_options_dict: - boot_option_references = boot_options_dict.keys() - for ref in boot_list: - if ref not in boot_option_references: - return {'ret': False, - 'msg': "BootOptionReference %s not found in BootOptions" % ref} - - # If requested BootOrder is already set, nothing to do - if boot_order == boot_list: - return {'ret': True, 'changed': False, - 'msg': "BootOrder already set to %s" % boot_list} - - payload = { - 'Boot': { - 'BootOrder': boot_list - } - } - response = self.patch_request(self.root_uri + systems_uri, payload) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True, 'msg': "BootOrder set"} - - def set_default_boot_order(self): - systems_uri = self.systems_uri - response = self.get_request(self.root_uri + systems_uri) - if response['ret'] is False: - return response - data = response['data'] - - # get the #ComputerSystem.SetDefaultBootOrder Action and target URI - action = '#ComputerSystem.SetDefaultBootOrder' - if 'Actions' not in data or action not in data['Actions']: - return {'ret': False, 'msg': 'Action %s not found' % action} - if 'target' not in data['Actions'][action]: - return {'ret': False, - 'msg': 'target URI missing from Action %s' % action} - action_uri = data['Actions'][action]['target'] - - # POST to Action URI - payload = {} - response = self.post_request(self.root_uri + action_uri, payload) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True, - 'msg': "BootOrder set to default"} - - def get_chassis_inventory(self): - result = {} - chassis_results = [] - - # Get these entries, but does not fail if not found - properties = ['Name', 'Id', 'ChassisType', 'PartNumber', 'AssetTag', - 'Manufacturer', 'IndicatorLED', 'SerialNumber', 'Model'] - - # Go through list - for chassis_uri in self.chassis_uris: - response = self.get_request(self.root_uri + chassis_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - chassis_result = {} - for property in properties: - if property in data: - chassis_result[property] = data[property] - chassis_results.append(chassis_result) - - result["entries"] = chassis_results - return result - - def get_fan_inventory(self): - result = {} - fan_results = [] - key = "Thermal" - # Get these entries, but does not fail if not found - properties = ['Name', 'FanName', 'Reading', 'ReadingUnits', 'Status'] - - # Go through list - for chassis_uri in self.chassis_uris: - response = self.get_request(self.root_uri + chassis_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - if key in data: - # match: found an entry for "Thermal" information = fans - thermal_uri = data[key]["@odata.id"] - response = self.get_request(self.root_uri + thermal_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - - # Checking if fans are present - if u'Fans' in data: - for device in data[u'Fans']: - fan = {} - for property in properties: - if property in device: - fan[property] = device[property] - fan_results.append(fan) - else: - return {'ret': False, 'msg': "No Fans present"} - result["entries"] = fan_results - return result - - def get_chassis_power(self): - result = {} - key = "Power" - - # Get these entries, but does not fail if not found - properties = ['Name', 'PowerAllocatedWatts', - 'PowerAvailableWatts', 'PowerCapacityWatts', - 'PowerConsumedWatts', 'PowerMetrics', - 'PowerRequestedWatts', 'RelatedItem', 'Status'] - - chassis_power_results = [] - # Go through list - for chassis_uri in self.chassis_uris: - chassis_power_result = {} - response = self.get_request(self.root_uri + chassis_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - if key in data: - response = self.get_request(self.root_uri + data[key]['@odata.id']) - data = response['data'] - if 'PowerControl' in data: - if len(data['PowerControl']) > 0: - data = data['PowerControl'][0] - for property in properties: - if property in data: - chassis_power_result[property] = data[property] - else: - return {'ret': False, 'msg': 'Key PowerControl not found.'} - chassis_power_results.append(chassis_power_result) - else: - return {'ret': False, 'msg': 'Key Power not found.'} - - result['entries'] = chassis_power_results - return result - - def get_chassis_thermals(self): - result = {} - sensors = [] - key = "Thermal" - - # Get these entries, but does not fail if not found - properties = ['Name', 'PhysicalContext', 'UpperThresholdCritical', - 'UpperThresholdFatal', 'UpperThresholdNonCritical', - 'LowerThresholdCritical', 'LowerThresholdFatal', - 'LowerThresholdNonCritical', 'MaxReadingRangeTemp', - 'MinReadingRangeTemp', 'ReadingCelsius', 'RelatedItem', - 'SensorNumber', 'Status'] - - # Go through list - for chassis_uri in self.chassis_uris: - response = self.get_request(self.root_uri + chassis_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - if key in data: - thermal_uri = data[key]["@odata.id"] - response = self.get_request(self.root_uri + thermal_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - if "Temperatures" in data: - for sensor in data[u'Temperatures']: - sensor_result = {} - for property in properties: - if property in sensor: - if sensor[property] is not None: - sensor_result[property] = sensor[property] - sensors.append(sensor_result) - - if sensors is None: - return {'ret': False, 'msg': 'Key Temperatures was not found.'} - - result['entries'] = sensors - return result - - def get_cpu_inventory(self, systems_uri): - result = {} - cpu_list = [] - cpu_results = [] - key = "Processors" - # Get these entries, but does not fail if not found - properties = ['Id', 'Name', 'Manufacturer', 'Model', 'MaxSpeedMHz', - 'TotalCores', 'TotalThreads', 'Status'] - - # Search for 'key' entry and extract URI from it - response = self.get_request(self.root_uri + systems_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - - if key not in data: - return {'ret': False, 'msg': "Key %s not found" % key} - - processors_uri = data[key]["@odata.id"] - - # Get a list of all CPUs and build respective URIs - response = self.get_request(self.root_uri + processors_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - - for cpu in data[u'Members']: - cpu_list.append(cpu[u'@odata.id']) - - for c in cpu_list: - cpu = {} - uri = self.root_uri + c - response = self.get_request(uri) - if response['ret'] is False: - return response - data = response['data'] - - for property in properties: - if property in data: - cpu[property] = data[property] - - cpu_results.append(cpu) - result["entries"] = cpu_results - return result - - def get_multi_cpu_inventory(self): - return self.aggregate_systems(self.get_cpu_inventory) - - def get_memory_inventory(self, systems_uri): - result = {} - memory_list = [] - memory_results = [] - key = "Memory" - # Get these entries, but does not fail if not found - properties = ['Id', 'SerialNumber', 'MemoryDeviceType', 'PartNumber', - 'MemoryLocation', 'RankCount', 'CapacityMiB', 'OperatingMemoryModes', 'Status', 'Manufacturer', 'Name'] - - # Search for 'key' entry and extract URI from it - response = self.get_request(self.root_uri + systems_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - - if key not in data: - return {'ret': False, 'msg': "Key %s not found" % key} - - memory_uri = data[key]["@odata.id"] - - # Get a list of all DIMMs and build respective URIs - response = self.get_request(self.root_uri + memory_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - - for dimm in data[u'Members']: - memory_list.append(dimm[u'@odata.id']) - - for m in memory_list: - dimm = {} - uri = self.root_uri + m - response = self.get_request(uri) - if response['ret'] is False: - return response - data = response['data'] - - if "Status" in data: - if "State" in data["Status"]: - if data["Status"]["State"] == "Absent": - continue - else: - continue - - for property in properties: - if property in data: - dimm[property] = data[property] - - memory_results.append(dimm) - result["entries"] = memory_results - return result - - def get_multi_memory_inventory(self): - return self.aggregate_systems(self.get_memory_inventory) - - def get_nic(self, resource_uri): - result = {} - properties = ['Name', 'Id', 'Description', 'FQDN', 'IPv4Addresses', 'IPv6Addresses', - 'NameServers', 'MACAddress', 'PermanentMACAddress', - 'SpeedMbps', 'MTUSize', 'AutoNeg', 'Status'] - response = self.get_request(self.root_uri + resource_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - nic = {} - for property in properties: - if property in data: - nic[property] = data[property] - result['entries'] = nic - return(result) - - def get_nic_inventory(self, resource_uri): - result = {} - nic_list = [] - nic_results = [] - key = "EthernetInterfaces" - - response = self.get_request(self.root_uri + resource_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - - if key not in data: - return {'ret': False, 'msg': "Key %s not found" % key} - - ethernetinterfaces_uri = data[key]["@odata.id"] - - # Get a list of all network controllers and build respective URIs - response = self.get_request(self.root_uri + ethernetinterfaces_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - - for nic in data[u'Members']: - nic_list.append(nic[u'@odata.id']) - - for n in nic_list: - nic = self.get_nic(n) - if nic['ret']: - nic_results.append(nic['entries']) - result["entries"] = nic_results - return result - - def get_multi_nic_inventory(self, resource_type): - ret = True - entries = [] - - # Given resource_type, use the proper URI - if resource_type == 'Systems': - resource_uris = self.systems_uris - elif resource_type == 'Manager': - resource_uris = self.manager_uris - - for resource_uri in resource_uris: - inventory = self.get_nic_inventory(resource_uri) - ret = inventory.pop('ret') and ret - if 'entries' in inventory: - entries.append(({'resource_uri': resource_uri}, - inventory['entries'])) - return dict(ret=ret, entries=entries) - - def get_virtualmedia(self, resource_uri): - result = {} - virtualmedia_list = [] - virtualmedia_results = [] - key = "VirtualMedia" - # Get these entries, but does not fail if not found - properties = ['Description', 'ConnectedVia', 'Id', 'MediaTypes', - 'Image', 'ImageName', 'Name', 'WriteProtected', - 'TransferMethod', 'TransferProtocolType'] - - response = self.get_request(self.root_uri + resource_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - - if key not in data: - return {'ret': False, 'msg': "Key %s not found" % key} - - virtualmedia_uri = data[key]["@odata.id"] - - # Get a list of all virtual media and build respective URIs - response = self.get_request(self.root_uri + virtualmedia_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - - for virtualmedia in data[u'Members']: - virtualmedia_list.append(virtualmedia[u'@odata.id']) - - for n in virtualmedia_list: - virtualmedia = {} - uri = self.root_uri + n - response = self.get_request(uri) - if response['ret'] is False: - return response - data = response['data'] - - for property in properties: - if property in data: - virtualmedia[property] = data[property] - - virtualmedia_results.append(virtualmedia) - result["entries"] = virtualmedia_results - return result - - def get_multi_virtualmedia(self): - ret = True - entries = [] - - resource_uris = self.manager_uris - - for resource_uri in resource_uris: - virtualmedia = self.get_virtualmedia(resource_uri) - ret = virtualmedia.pop('ret') and ret - if 'entries' in virtualmedia: - entries.append(({'resource_uri': resource_uri}, - virtualmedia['entries'])) - return dict(ret=ret, entries=entries) - - @staticmethod - def _find_empty_virt_media_slot(resources, media_types, - media_match_strict=True): - for uri, data in resources.items(): - # check MediaTypes - if 'MediaTypes' in data and media_types: - if not set(media_types).intersection(set(data['MediaTypes'])): - continue - else: - if media_match_strict: - continue - # if ejected, 'Inserted' should be False and 'ImageName' cleared - if (not data.get('Inserted', False) and - not data.get('ImageName')): - return uri, data - return None, None - - @staticmethod - def _virt_media_image_inserted(resources, image_url): - for uri, data in resources.items(): - if data.get('Image'): - if urlparse(image_url) == urlparse(data.get('Image')): - if data.get('Inserted', False) and data.get('ImageName'): - return True - return False - - @staticmethod - def _find_virt_media_to_eject(resources, image_url): - matched_uri, matched_data = None, None - for uri, data in resources.items(): - if data.get('Image'): - if urlparse(image_url) == urlparse(data.get('Image')): - matched_uri, matched_data = uri, data - if data.get('Inserted', True) and data.get('ImageName', 'x'): - return uri, data, True - return matched_uri, matched_data, False - - def _read_virt_media_resources(self, uri_list): - resources = {} - headers = {} - for uri in uri_list: - response = self.get_request(self.root_uri + uri) - if response['ret'] is False: - continue - resources[uri] = response['data'] - headers[uri] = response['headers'] - return resources, headers - - @staticmethod - def _insert_virt_media_payload(options, param_map, data, ai): - payload = { - 'Image': options.get('image_url') - } - for param, option in param_map.items(): - if options.get(option) is not None and param in data: - allowable = ai.get(param, {}).get('AllowableValues', []) - if allowable and options.get(option) not in allowable: - return {'ret': False, - 'msg': "Value '%s' specified for option '%s' not " - "in list of AllowableValues %s" % ( - options.get(option), option, - allowable)} - payload[param] = options.get(option) - return payload - - def virtual_media_insert_via_patch(self, options, param_map, uri, data): - # get AllowableValues - ai = dict((k[:-24], - {'AllowableValues': v}) for k, v in data.items() - if k.endswith('@Redfish.AllowableValues')) - # construct payload - payload = self._insert_virt_media_payload(options, param_map, data, ai) - if 'Inserted' not in payload: - payload['Inserted'] = True - # PATCH the resource - response = self.patch_request(self.root_uri + uri, payload) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True, 'msg': "VirtualMedia inserted"} - - def virtual_media_insert(self, options): - param_map = { - 'Inserted': 'inserted', - 'WriteProtected': 'write_protected', - 'UserName': 'username', - 'Password': 'password', - 'TransferProtocolType': 'transfer_protocol_type', - 'TransferMethod': 'transfer_method' - } - image_url = options.get('image_url') - if not image_url: - return {'ret': False, - 'msg': "image_url option required for VirtualMediaInsert"} - media_types = options.get('media_types') - - # locate and read the VirtualMedia resources - response = self.get_request(self.root_uri + self.manager_uri) - if response['ret'] is False: - return response - data = response['data'] - if 'VirtualMedia' not in data: - return {'ret': False, 'msg': "VirtualMedia resource not found"} - virt_media_uri = data["VirtualMedia"]["@odata.id"] - response = self.get_request(self.root_uri + virt_media_uri) - if response['ret'] is False: - return response - data = response['data'] - virt_media_list = [] - for member in data[u'Members']: - virt_media_list.append(member[u'@odata.id']) - resources, headers = self._read_virt_media_resources(virt_media_list) - - # see if image already inserted; if so, nothing to do - if self._virt_media_image_inserted(resources, image_url): - return {'ret': True, 'changed': False, - 'msg': "VirtualMedia '%s' already inserted" % image_url} - - # find an empty slot to insert the media - # try first with strict media_type matching - uri, data = self._find_empty_virt_media_slot( - resources, media_types, media_match_strict=True) - if not uri: - # if not found, try without strict media_type matching - uri, data = self._find_empty_virt_media_slot( - resources, media_types, media_match_strict=False) - if not uri: - return {'ret': False, - 'msg': "Unable to find an available VirtualMedia resource " - "%s" % ('supporting ' + str(media_types) - if media_types else '')} - - # confirm InsertMedia action found - if ('Actions' not in data or - '#VirtualMedia.InsertMedia' not in data['Actions']): - # try to insert via PATCH if no InsertMedia action found - h = headers[uri] - if 'allow' in h: - methods = [m.strip() for m in h.get('allow').split(',')] - if 'PATCH' not in methods: - # if Allow header present and PATCH missing, return error - return {'ret': False, - 'msg': "%s action not found and PATCH not allowed" - % '#VirtualMedia.InsertMedia'} - return self.virtual_media_insert_via_patch(options, param_map, - uri, data) - - # get the action property - action = data['Actions']['#VirtualMedia.InsertMedia'] - if 'target' not in action: - return {'ret': False, - 'msg': "target URI missing from Action " - "#VirtualMedia.InsertMedia"} - action_uri = action['target'] - # get ActionInfo or AllowableValues - ai = self._get_all_action_info_values(action) - # construct payload - payload = self._insert_virt_media_payload(options, param_map, data, ai) - # POST to action - response = self.post_request(self.root_uri + action_uri, payload) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True, 'msg': "VirtualMedia inserted"} - - def virtual_media_eject_via_patch(self, uri): - # construct payload - payload = { - 'Inserted': False, - 'Image': None - } - # PATCH resource - response = self.patch_request(self.root_uri + uri, payload) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True, - 'msg': "VirtualMedia ejected"} - - def virtual_media_eject(self, options): - image_url = options.get('image_url') - if not image_url: - return {'ret': False, - 'msg': "image_url option required for VirtualMediaEject"} - - # locate and read the VirtualMedia resources - response = self.get_request(self.root_uri + self.manager_uri) - if response['ret'] is False: - return response - data = response['data'] - if 'VirtualMedia' not in data: - return {'ret': False, 'msg': "VirtualMedia resource not found"} - virt_media_uri = data["VirtualMedia"]["@odata.id"] - response = self.get_request(self.root_uri + virt_media_uri) - if response['ret'] is False: - return response - data = response['data'] - virt_media_list = [] - for member in data[u'Members']: - virt_media_list.append(member[u'@odata.id']) - resources, headers = self._read_virt_media_resources(virt_media_list) - - # find the VirtualMedia resource to eject - uri, data, eject = self._find_virt_media_to_eject(resources, image_url) - if uri and eject: - if ('Actions' not in data or - '#VirtualMedia.EjectMedia' not in data['Actions']): - # try to eject via PATCH if no EjectMedia action found - h = headers[uri] - if 'allow' in h: - methods = [m.strip() for m in h.get('allow').split(',')] - if 'PATCH' not in methods: - # if Allow header present and PATCH missing, return error - return {'ret': False, - 'msg': "%s action not found and PATCH not allowed" - % '#VirtualMedia.EjectMedia'} - return self.virtual_media_eject_via_patch(uri) - else: - # POST to the EjectMedia Action - action = data['Actions']['#VirtualMedia.EjectMedia'] - if 'target' not in action: - return {'ret': False, - 'msg': "target URI property missing from Action " - "#VirtualMedia.EjectMedia"} - action_uri = action['target'] - # empty payload for Eject action - payload = {} - # POST to action - response = self.post_request(self.root_uri + action_uri, - payload) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True, - 'msg': "VirtualMedia ejected"} - elif uri and not eject: - # already ejected: return success but changed=False - return {'ret': True, 'changed': False, - 'msg': "VirtualMedia image '%s' already ejected" % - image_url} - else: - # return failure (no resources matching image_url found) - return {'ret': False, 'changed': False, - 'msg': "No VirtualMedia resource found with image '%s' " - "inserted" % image_url} - - def get_psu_inventory(self): - result = {} - psu_list = [] - psu_results = [] - key = "PowerSupplies" - # Get these entries, but does not fail if not found - properties = ['Name', 'Model', 'SerialNumber', 'PartNumber', 'Manufacturer', - 'FirmwareVersion', 'PowerCapacityWatts', 'PowerSupplyType', - 'Status'] - - # Get a list of all Chassis and build URIs, then get all PowerSupplies - # from each Power entry in the Chassis - chassis_uri_list = self.chassis_uris - for chassis_uri in chassis_uri_list: - response = self.get_request(self.root_uri + chassis_uri) - if response['ret'] is False: - return response - - result['ret'] = True - data = response['data'] - - if 'Power' in data: - power_uri = data[u'Power'][u'@odata.id'] - else: - continue - - response = self.get_request(self.root_uri + power_uri) - data = response['data'] - - if key not in data: - return {'ret': False, 'msg': "Key %s not found" % key} - - psu_list = data[key] - for psu in psu_list: - psu_not_present = False - psu_data = {} - for property in properties: - if property in psu: - if psu[property] is not None: - if property == 'Status': - if 'State' in psu[property]: - if psu[property]['State'] == 'Absent': - psu_not_present = True - psu_data[property] = psu[property] - if psu_not_present: - continue - psu_results.append(psu_data) - - result["entries"] = psu_results - if not result["entries"]: - return {'ret': False, 'msg': "No PowerSupply objects found"} - return result - - def get_multi_psu_inventory(self): - return self.aggregate_systems(self.get_psu_inventory) - - def get_system_inventory(self, systems_uri): - result = {} - inventory = {} - # Get these entries, but does not fail if not found - properties = ['Status', 'HostName', 'PowerState', 'Model', 'Manufacturer', - 'PartNumber', 'SystemType', 'AssetTag', 'ServiceTag', - 'SerialNumber', 'SKU', 'BiosVersion', 'MemorySummary', - 'ProcessorSummary', 'TrustedModules', 'Name', 'Id'] - - response = self.get_request(self.root_uri + systems_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - - for property in properties: - if property in data: - inventory[property] = data[property] - - result["entries"] = inventory - return result - - def get_multi_system_inventory(self): - return self.aggregate_systems(self.get_system_inventory) - - def get_network_protocols(self): - result = {} - service_result = {} - # Find NetworkProtocol - response = self.get_request(self.root_uri + self.manager_uri) - if response['ret'] is False: - return response - data = response['data'] - if 'NetworkProtocol' not in data: - return {'ret': False, 'msg': "NetworkProtocol resource not found"} - networkprotocol_uri = data["NetworkProtocol"]["@odata.id"] - - response = self.get_request(self.root_uri + networkprotocol_uri) - if response['ret'] is False: - return response - data = response['data'] - protocol_services = ['SNMP', 'VirtualMedia', 'Telnet', 'SSDP', 'IPMI', 'SSH', - 'KVMIP', 'NTP', 'HTTP', 'HTTPS', 'DHCP', 'DHCPv6', 'RDP', - 'RFB'] - for protocol_service in protocol_services: - if protocol_service in data.keys(): - service_result[protocol_service] = data[protocol_service] - - result['ret'] = True - result["entries"] = service_result - return result - - def set_network_protocols(self, manager_services): - # Check input data validity - protocol_services = ['SNMP', 'VirtualMedia', 'Telnet', 'SSDP', 'IPMI', 'SSH', - 'KVMIP', 'NTP', 'HTTP', 'HTTPS', 'DHCP', 'DHCPv6', 'RDP', - 'RFB'] - protocol_state_onlist = ['true', 'True', True, 'on', 1] - protocol_state_offlist = ['false', 'False', False, 'off', 0] - payload = {} - for service_name in manager_services.keys(): - if service_name not in protocol_services: - return {'ret': False, 'msg': "Service name %s is invalid" % service_name} - payload[service_name] = {} - for service_property in manager_services[service_name].keys(): - value = manager_services[service_name][service_property] - if service_property in ['ProtocolEnabled', 'protocolenabled']: - if value in protocol_state_onlist: - payload[service_name]['ProtocolEnabled'] = True - elif value in protocol_state_offlist: - payload[service_name]['ProtocolEnabled'] = False - else: - return {'ret': False, 'msg': "Value of property %s is invalid" % service_property} - elif service_property in ['port', 'Port']: - if isinstance(value, int): - payload[service_name]['Port'] = value - elif isinstance(value, str) and value.isdigit(): - payload[service_name]['Port'] = int(value) - else: - return {'ret': False, 'msg': "Value of property %s is invalid" % service_property} - else: - payload[service_name][service_property] = value - - # Find NetworkProtocol - response = self.get_request(self.root_uri + self.manager_uri) - if response['ret'] is False: - return response - data = response['data'] - if 'NetworkProtocol' not in data: - return {'ret': False, 'msg': "NetworkProtocol resource not found"} - networkprotocol_uri = data["NetworkProtocol"]["@odata.id"] - - # Check service property support or not - response = self.get_request(self.root_uri + networkprotocol_uri) - if response['ret'] is False: - return response - data = response['data'] - for service_name in payload.keys(): - if service_name not in data: - return {'ret': False, 'msg': "%s service not supported" % service_name} - for service_property in payload[service_name].keys(): - if service_property not in data[service_name]: - return {'ret': False, 'msg': "%s property for %s service not supported" % (service_property, service_name)} - - # if the protocol is already set, nothing to do - need_change = False - for service_name in payload.keys(): - for service_property in payload[service_name].keys(): - value = payload[service_name][service_property] - if value != data[service_name][service_property]: - need_change = True - break - - if not need_change: - return {'ret': True, 'changed': False, 'msg': "Manager NetworkProtocol services already set"} - - response = self.patch_request(self.root_uri + networkprotocol_uri, payload) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True, 'msg': "Modified Manager NetworkProtocol services"} - - @staticmethod - def to_singular(resource_name): - if resource_name.endswith('ies'): - resource_name = resource_name[:-3] + 'y' - elif resource_name.endswith('s'): - resource_name = resource_name[:-1] - return resource_name - - def get_health_resource(self, subsystem, uri, health, expanded): - status = 'Status' - - if expanded: - d = expanded - else: - r = self.get_request(self.root_uri + uri) - if r.get('ret'): - d = r.get('data') - else: - return - - if 'Members' in d: # collections case - for m in d.get('Members'): - u = m.get('@odata.id') - r = self.get_request(self.root_uri + u) - if r.get('ret'): - p = r.get('data') - if p: - e = {self.to_singular(subsystem.lower()) + '_uri': u, - status: p.get(status, - "Status not available")} - health[subsystem].append(e) - else: # non-collections case - e = {self.to_singular(subsystem.lower()) + '_uri': uri, - status: d.get(status, - "Status not available")} - health[subsystem].append(e) - - def get_health_subsystem(self, subsystem, data, health): - if subsystem in data: - sub = data.get(subsystem) - if isinstance(sub, list): - for r in sub: - if '@odata.id' in r: - uri = r.get('@odata.id') - expanded = None - if '#' in uri and len(r) > 1: - expanded = r - self.get_health_resource(subsystem, uri, health, expanded) - elif isinstance(sub, dict): - if '@odata.id' in sub: - uri = sub.get('@odata.id') - self.get_health_resource(subsystem, uri, health, None) - elif 'Members' in data: - for m in data.get('Members'): - u = m.get('@odata.id') - r = self.get_request(self.root_uri + u) - if r.get('ret'): - d = r.get('data') - self.get_health_subsystem(subsystem, d, health) - - def get_health_report(self, category, uri, subsystems): - result = {} - health = {} - status = 'Status' - - # Get health status of top level resource - response = self.get_request(self.root_uri + uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - health[category] = {status: data.get(status, "Status not available")} - - # Get health status of subsystems - for sub in subsystems: - d = None - if sub.startswith('Links.'): # ex: Links.PCIeDevices - sub = sub[len('Links.'):] - d = data.get('Links', {}) - elif '.' in sub: # ex: Thermal.Fans - p, sub = sub.split('.') - u = data.get(p, {}).get('@odata.id') - if u: - r = self.get_request(self.root_uri + u) - if r['ret']: - d = r['data'] - if not d: - continue - else: # ex: Memory - d = data - health[sub] = [] - self.get_health_subsystem(sub, d, health) - if not health[sub]: - del health[sub] - - result["entries"] = health - return result - - def get_system_health_report(self, systems_uri): - subsystems = ['Processors', 'Memory', 'SimpleStorage', 'Storage', - 'EthernetInterfaces', 'NetworkInterfaces.NetworkPorts', - 'NetworkInterfaces.NetworkDeviceFunctions'] - return self.get_health_report('System', systems_uri, subsystems) - - def get_multi_system_health_report(self): - return self.aggregate_systems(self.get_system_health_report) - - def get_chassis_health_report(self, chassis_uri): - subsystems = ['Power.PowerSupplies', 'Thermal.Fans', - 'Links.PCIeDevices'] - return self.get_health_report('Chassis', chassis_uri, subsystems) - - def get_multi_chassis_health_report(self): - return self.aggregate_chassis(self.get_chassis_health_report) - - def get_manager_health_report(self, manager_uri): - subsystems = [] - return self.get_health_report('Manager', manager_uri, subsystems) - - def get_multi_manager_health_report(self): - return self.aggregate_managers(self.get_manager_health_report) - - def set_manager_nic(self, nic_addr, nic_config): - # Get the manager ethernet interface uri - nic_info = self.get_manager_ethernet_uri(nic_addr) - - if nic_info.get('nic_addr') is None: - return nic_info - else: - target_ethernet_uri = nic_info['nic_addr'] - target_ethernet_current_setting = nic_info['ethernet_setting'] - - # Convert input to payload and check validity - payload = {} - for property in nic_config.keys(): - value = nic_config[property] - if property not in target_ethernet_current_setting: - return {'ret': False, 'msg': "Property %s in nic_config is invalid" % property} - if isinstance(value, dict): - if isinstance(target_ethernet_current_setting[property], dict): - payload[property] = value - elif isinstance(target_ethernet_current_setting[property], list): - payload[property] = list() - payload[property].append(value) - else: - return {'ret': False, 'msg': "Value of property %s in nic_config is invalid" % property} - else: - payload[property] = value - - # If no need change, nothing to do. If error detected, report it - need_change = False - for property in payload.keys(): - set_value = payload[property] - cur_value = target_ethernet_current_setting[property] - # type is simple(not dict/list) - if not isinstance(set_value, dict) and not isinstance(set_value, list): - if set_value != cur_value: - need_change = True - # type is dict - if isinstance(set_value, dict): - for subprop in payload[property].keys(): - if subprop not in target_ethernet_current_setting[property]: - # Not configured already; need to apply the request - need_change = True - break - sub_set_value = payload[property][subprop] - sub_cur_value = target_ethernet_current_setting[property][subprop] - if sub_set_value != sub_cur_value: - need_change = True - # type is list - if isinstance(set_value, list): - if len(set_value) != len(cur_value): - # if arrays are not the same len, no need to check each element - need_change = True - continue - for i in range(len(set_value)): - for subprop in payload[property][i].keys(): - if subprop not in target_ethernet_current_setting[property][i]: - # Not configured already; need to apply the request - need_change = True - break - sub_set_value = payload[property][i][subprop] - sub_cur_value = target_ethernet_current_setting[property][i][subprop] - if sub_set_value != sub_cur_value: - need_change = True - - if not need_change: - return {'ret': True, 'changed': False, 'msg': "Manager NIC already set"} - - response = self.patch_request(self.root_uri + target_ethernet_uri, payload) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True, 'msg': "Modified Manager NIC"} - - # A helper function to get the EthernetInterface URI - def get_manager_ethernet_uri(self, nic_addr='null'): - # Get EthernetInterface collection - response = self.get_request(self.root_uri + self.manager_uri) - if not response['ret']: - return response - data = response['data'] - if 'EthernetInterfaces' not in data: - return {'ret': False, 'msg': "EthernetInterfaces resource not found"} - ethernetinterfaces_uri = data["EthernetInterfaces"]["@odata.id"] - response = self.get_request(self.root_uri + ethernetinterfaces_uri) - if not response['ret']: - return response - data = response['data'] - uris = [a.get('@odata.id') for a in data.get('Members', []) if - a.get('@odata.id')] - - # Find target EthernetInterface - target_ethernet_uri = None - target_ethernet_current_setting = None - if nic_addr == 'null': - # Find root_uri matched EthernetInterface when nic_addr is not specified - nic_addr = (self.root_uri).split('/')[-1] - nic_addr = nic_addr.split(':')[0] # split port if existing - for uri in uris: - response = self.get_request(self.root_uri + uri) - if not response['ret']: - return response - data = response['data'] - data_string = json.dumps(data) - if nic_addr.lower() in data_string.lower(): - target_ethernet_uri = uri - target_ethernet_current_setting = data - break - - nic_info = {} - nic_info['nic_addr'] = target_ethernet_uri - nic_info['ethernet_setting'] = target_ethernet_current_setting - - if target_ethernet_uri is None: - return {} - else: - return nic_info - - def set_hostinterface_attributes(self, hostinterface_config, hostinterface_id=None): - response = self.get_request(self.root_uri + self.manager_uri) - if response['ret'] is False: - return response - data = response['data'] - if 'HostInterfaces' not in data: - return {'ret': False, 'msg': "HostInterfaces resource not found"} - - hostinterfaces_uri = data["HostInterfaces"]["@odata.id"] - response = self.get_request(self.root_uri + hostinterfaces_uri) - if response['ret'] is False: - return response - data = response['data'] - uris = [a.get('@odata.id') for a in data.get('Members', []) if a.get('@odata.id')] - # Capture list of URIs that match a specified HostInterface resource ID - if hostinterface_id: - matching_hostinterface_uris = [uri for uri in uris if hostinterface_id in uri.split('/')[-1]] - - if hostinterface_id and matching_hostinterface_uris: - hostinterface_uri = list.pop(matching_hostinterface_uris) - elif hostinterface_id and not matching_hostinterface_uris: - return {'ret': False, 'msg': "HostInterface ID %s not present." % hostinterface_id} - elif len(uris) == 1: - hostinterface_uri = list.pop(uris) - else: - return {'ret': False, 'msg': "HostInterface ID not defined and multiple interfaces detected."} - - response = self.get_request(self.root_uri + hostinterface_uri) - if response['ret'] is False: - return response - current_hostinterface_config = response['data'] - payload = {} - for property in hostinterface_config.keys(): - value = hostinterface_config[property] - if property not in current_hostinterface_config: - return {'ret': False, 'msg': "Property %s in hostinterface_config is invalid" % property} - if isinstance(value, dict): - if isinstance(current_hostinterface_config[property], dict): - payload[property] = value - elif isinstance(current_hostinterface_config[property], list): - payload[property] = list() - payload[property].append(value) - else: - return {'ret': False, 'msg': "Value of property %s in hostinterface_config is invalid" % property} - else: - payload[property] = value - - need_change = False - for property in payload.keys(): - set_value = payload[property] - cur_value = current_hostinterface_config[property] - if not isinstance(set_value, dict) and not isinstance(set_value, list): - if set_value != cur_value: - need_change = True - if isinstance(set_value, dict): - for subprop in payload[property].keys(): - if subprop not in current_hostinterface_config[property]: - need_change = True - break - sub_set_value = payload[property][subprop] - sub_cur_value = current_hostinterface_config[property][subprop] - if sub_set_value != sub_cur_value: - need_change = True - if isinstance(set_value, list): - if len(set_value) != len(cur_value): - need_change = True - continue - for i in range(len(set_value)): - for subprop in payload[property][i].keys(): - if subprop not in current_hostinterface_config[property][i]: - need_change = True - break - sub_set_value = payload[property][i][subprop] - sub_cur_value = current_hostinterface_config[property][i][subprop] - if sub_set_value != sub_cur_value: - need_change = True - if not need_change: - return {'ret': True, 'changed': False, 'msg': "Host Interface already configured"} - - response = self.patch_request(self.root_uri + hostinterface_uri, payload) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True, 'msg': "Modified Host Interface"} - - def get_hostinterfaces(self): - result = {} - hostinterface_results = [] - properties = ['Id', 'Name', 'Description', 'HostInterfaceType', 'Status', - 'InterfaceEnabled', 'ExternallyAccessible', 'AuthenticationModes', - 'AuthNoneRoleId', 'CredentialBootstrapping'] - manager_uri_list = self.manager_uris - for manager_uri in manager_uri_list: - response = self.get_request(self.root_uri + manager_uri) - if response['ret'] is False: - return response - - result['ret'] = True - data = response['data'] - - if 'HostInterfaces' in data: - hostinterfaces_uri = data[u'HostInterfaces'][u'@odata.id'] - else: - continue - - response = self.get_request(self.root_uri + hostinterfaces_uri) - data = response['data'] - - if 'Members' in data: - for hostinterface in data['Members']: - hostinterface_uri = hostinterface['@odata.id'] - hostinterface_response = self.get_request(self.root_uri + hostinterface_uri) - # dictionary for capturing individual HostInterface properties - hostinterface_data_temp = {} - if hostinterface_response['ret'] is False: - return hostinterface_response - hostinterface_data = hostinterface_response['data'] - for property in properties: - if property in hostinterface_data: - if hostinterface_data[property] is not None: - hostinterface_data_temp[property] = hostinterface_data[property] - # Check for the presence of a ManagerEthernetInterface - # object, a link to a _single_ EthernetInterface that the - # BMC uses to communicate with the host. - if 'ManagerEthernetInterface' in hostinterface_data: - interface_uri = hostinterface_data['ManagerEthernetInterface']['@odata.id'] - interface_response = self.get_nic(interface_uri) - if interface_response['ret'] is False: - return interface_response - hostinterface_data_temp['ManagerEthernetInterface'] = interface_response['entries'] - - # Check for the presence of a HostEthernetInterfaces - # object, a link to a _collection_ of EthernetInterfaces - # that the host uses to communicate with the BMC. - if 'HostEthernetInterfaces' in hostinterface_data: - interfaces_uri = hostinterface_data['HostEthernetInterfaces']['@odata.id'] - interfaces_response = self.get_request(self.root_uri + interfaces_uri) - if interfaces_response['ret'] is False: - return interfaces_response - interfaces_data = interfaces_response['data'] - if 'Members' in interfaces_data: - for interface in interfaces_data['Members']: - interface_uri = interface['@odata.id'] - interface_response = self.get_nic(interface_uri) - if interface_response['ret'] is False: - return interface_response - # Check if this is the first - # HostEthernetInterfaces item and create empty - # list if so. - if 'HostEthernetInterfaces' not in hostinterface_data_temp: - hostinterface_data_temp['HostEthernetInterfaces'] = [] - - hostinterface_data_temp['HostEthernetInterfaces'].append(interface_response['entries']) - - hostinterface_results.append(hostinterface_data_temp) - else: - continue - result["entries"] = hostinterface_results - if not result["entries"]: - return {'ret': False, 'msg': "No HostInterface objects found"} - return result diff --git a/ansible_collections/community/general/plugins/module_utils/redis.py b/ansible_collections/community/general/plugins/module_utils/redis.py deleted file mode 100644 index de5c8c7f..00000000 --- a/ansible_collections/community/general/plugins/module_utils/redis.py +++ /dev/null @@ -1,97 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2021, Andreas Botzner -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -from ansible.module_utils.basic import missing_required_lib -__metaclass__ = type - -import traceback - -REDIS_IMP_ERR = None -try: - from redis import Redis - from redis import __version__ as redis_version - HAS_REDIS_PACKAGE = True -except ImportError: - REDIS_IMP_ERR = traceback.format_exc() - HAS_REDIS_PACKAGE = False - -try: - import certifi - HAS_CERTIFI_PACKAGE = True -except ImportError: - CERTIFI_IMPORT_ERROR = traceback.format_exc() - HAS_CERTIFI_PACKAGE = False - - -def fail_imports(module, needs_certifi=True): - errors = [] - traceback = [] - if not HAS_REDIS_PACKAGE: - errors.append(missing_required_lib('redis')) - traceback.append(REDIS_IMP_ERR) - if not HAS_CERTIFI_PACKAGE and needs_certifi: - errors.append(missing_required_lib('certifi')) - traceback.append(CERTIFI_IMPORT_ERROR) - if errors: - module.fail_json(errors=errors, traceback='\n'.join(traceback)) - - -def redis_auth_argument_spec(tls_default=True): - return dict( - login_host=dict(type='str', - default='localhost',), - login_user=dict(type='str'), - login_password=dict(type='str', - no_log=True - ), - login_port=dict(type='int', default=6379), - tls=dict(type='bool', - default=tls_default), - validate_certs=dict(type='bool', - default=True - ), - ca_certs=dict(type='str') - ) - - -def redis_auth_params(module): - login_host = module.params['login_host'] - login_user = module.params['login_user'] - login_password = module.params['login_password'] - login_port = module.params['login_port'] - tls = module.params['tls'] - validate_certs = 'required' if module.params['validate_certs'] else None - ca_certs = module.params['ca_certs'] - if tls and ca_certs is None: - ca_certs = str(certifi.where()) - if tuple(map(int, redis_version.split('.'))) < (3, 4, 0) and login_user is not None: - module.fail_json( - msg='The option `username` in only supported with redis >= 3.4.0.') - params = {'host': login_host, - 'port': login_port, - 'password': login_password, - 'ssl_ca_certs': ca_certs, - 'ssl_cert_reqs': validate_certs, - 'ssl': tls} - if login_user is not None: - params['username'] = login_user - return params - - -class RedisAnsible(object): - '''Base class for Redis module''' - - def __init__(self, module): - self.module = module - self.connection = self._connect() - - def _connect(self): - try: - return Redis(**redis_auth_params(self.module)) - except Exception as e: - self.module.fail_json(msg='{0}'.format(str(e))) - return None diff --git a/ansible_collections/community/general/plugins/module_utils/remote_management/lxca/common.py b/ansible_collections/community/general/plugins/module_utils/remote_management/lxca/common.py deleted file mode 100644 index 07092b96..00000000 --- a/ansible_collections/community/general/plugins/module_utils/remote_management/lxca/common.py +++ /dev/null @@ -1,79 +0,0 @@ -# -*- coding: utf-8 -*- -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by -# Ansible still belong to the author of the module, and may assign their -# own license to the complete work. -# -# Copyright (C) 2017 Lenovo, Inc. -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) -# -# Contains LXCA common class -# Lenovo xClarity Administrator (LXCA) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import traceback -try: - from pylxca import connect, disconnect - HAS_PYLXCA = True -except ImportError: - HAS_PYLXCA = False - - -PYLXCA_REQUIRED = "Lenovo xClarity Administrator Python Client (Python package 'pylxca') is required for this module." - - -def has_pylxca(module): - """ - Check pylxca is installed - :param module: - """ - if not HAS_PYLXCA: - module.fail_json(msg=PYLXCA_REQUIRED) - - -LXCA_COMMON_ARGS = dict( - login_user=dict(required=True), - login_password=dict(required=True, no_log=True), - auth_url=dict(required=True), -) - - -class connection_object: - def __init__(self, module): - self.module = module - - def __enter__(self): - return setup_conn(self.module) - - def __exit__(self, type, value, traceback): - close_conn() - - -def setup_conn(module): - """ - this function create connection to LXCA - :param module: - :return: lxca connection - """ - lxca_con = None - try: - lxca_con = connect(module.params['auth_url'], - module.params['login_user'], - module.params['login_password'], - "True") - except Exception as exception: - error_msg = '; '.join(exception.args) - module.fail_json(msg=error_msg, exception=traceback.format_exc()) - return lxca_con - - -def close_conn(): - """ - this function close connection to LXCA - :param module: - :return: None - """ - disconnect() diff --git a/ansible_collections/community/general/plugins/module_utils/rundeck.py b/ansible_collections/community/general/plugins/module_utils/rundeck.py deleted file mode 100644 index afbbb481..00000000 --- a/ansible_collections/community/general/plugins/module_utils/rundeck.py +++ /dev/null @@ -1,94 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Phillipe Smith -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -import json - -from ansible.module_utils.urls import fetch_url, url_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -def api_argument_spec(): - ''' - Creates an argument spec that can be used with any module - that will be requesting content via Rundeck API - ''' - api_argument_spec = url_argument_spec() - api_argument_spec.update(dict( - url=dict(required=True, type="str"), - api_version=dict(type="int", default=39), - api_token=dict(required=True, type="str", no_log=True) - )) - - return api_argument_spec - - -def api_request(module, endpoint, data=None, method="GET"): - """Manages Rundeck API requests via HTTP(S) - - :arg module: The AnsibleModule (used to get url, api_version, api_token, etc). - :arg endpoint: The API endpoint to be used. - :kwarg data: The data to be sent (in case of POST/PUT). - :kwarg method: "POST", "PUT", etc. - - :returns: A tuple of (**response**, **info**). Use ``response.read()`` to read the data. - The **info** contains the 'status' and other meta data. When a HttpError (status >= 400) - occurred then ``info['body']`` contains the error response data:: - - Example:: - - data={...} - resp, info = fetch_url(module, - "http://rundeck.example.org", - data=module.jsonify(data), - method="POST") - status_code = info["status"] - body = resp.read() - if status_code >= 400 : - body = info['body'] - """ - - response, info = fetch_url( - module=module, - url="%s/api/%s/%s" % ( - module.params["url"], - module.params["api_version"], - endpoint - ), - data=json.dumps(data), - method=method, - headers={ - "Content-Type": "application/json", - "Accept": "application/json", - "X-Rundeck-Auth-Token": module.params["api_token"] - } - ) - - if info["status"] == 403: - module.fail_json(msg="Token authorization failed", - execution_info=json.loads(info["body"])) - if info["status"] == 409: - module.fail_json(msg="Job executions limit reached", - execution_info=json.loads(info["body"])) - elif info["status"] >= 500: - module.fail_json(msg="Rundeck API error", - execution_info=json.loads(info["body"])) - - try: - content = response.read() - json_response = json.loads(content) - return json_response, info - except AttributeError as error: - module.fail_json(msg="Rundeck API request error", - exception=to_native(error), - execution_info=info) - except ValueError as error: - module.fail_json( - msg="No valid JSON response", - exception=to_native(error), - execution_info=content - ) diff --git a/ansible_collections/community/general/plugins/module_utils/scaleway.py b/ansible_collections/community/general/plugins/module_utils/scaleway.py deleted file mode 100644 index e6fb8109..00000000 --- a/ansible_collections/community/general/plugins/module_utils/scaleway.py +++ /dev/null @@ -1,240 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import json -import re -import sys - -from ansible.module_utils.basic import env_fallback -from ansible.module_utils.urls import fetch_url -from ansible.module_utils.six.moves.urllib.parse import urlencode - - -def scaleway_argument_spec(): - return dict( - api_token=dict(required=True, fallback=(env_fallback, ['SCW_TOKEN', 'SCW_API_KEY', 'SCW_OAUTH_TOKEN', 'SCW_API_TOKEN']), - no_log=True, aliases=['oauth_token']), - api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://api.scaleway.com', aliases=['base_url']), - api_timeout=dict(type='int', default=30, aliases=['timeout']), - query_parameters=dict(type='dict', default={}), - validate_certs=dict(default=True, type='bool'), - ) - - -def payload_from_object(scw_object): - return dict( - (k, v) - for k, v in scw_object.items() - if k != 'id' and v is not None - ) - - -class ScalewayException(Exception): - - def __init__(self, message): - self.message = message - - -# Specify a complete Link header, for validation purposes -R_LINK_HEADER = r'''<[^>]+>;\srel="(first|previous|next|last)" - (,<[^>]+>;\srel="(first|previous|next|last)")*''' -# Specify a single relation, for iteration and string extraction purposes -R_RELATION = r'[^>]+)>; rel="(?Pfirst|previous|next|last)"' - - -def parse_pagination_link(header): - if not re.match(R_LINK_HEADER, header, re.VERBOSE): - raise ScalewayException('Scaleway API answered with an invalid Link pagination header') - else: - relations = header.split(',') - parsed_relations = {} - rc_relation = re.compile(R_RELATION) - for relation in relations: - match = rc_relation.match(relation) - if not match: - raise ScalewayException('Scaleway API answered with an invalid relation in the Link pagination header') - data = match.groupdict() - parsed_relations[data['relation']] = data['target_IRI'] - return parsed_relations - - -class Response(object): - - def __init__(self, resp, info): - self.body = None - if resp: - self.body = resp.read() - self.info = info - - @property - def json(self): - if not self.body: - if "body" in self.info: - return json.loads(self.info["body"]) - return None - try: - return json.loads(self.body) - except ValueError: - return None - - @property - def status_code(self): - return self.info["status"] - - @property - def ok(self): - return self.status_code in (200, 201, 202, 204) - - -class Scaleway(object): - - def __init__(self, module): - self.module = module - self.headers = { - 'X-Auth-Token': self.module.params.get('api_token'), - 'User-Agent': self.get_user_agent_string(module), - 'Content-Type': 'application/json', - } - self.name = None - - def get_resources(self): - results = self.get('/%s' % self.name) - - if not results.ok: - raise ScalewayException('Error fetching {0} ({1}) [{2}: {3}]'.format( - self.name, '%s/%s' % (self.module.params.get('api_url'), self.name), - results.status_code, results.json['message'] - )) - - return results.json.get(self.name) - - def _url_builder(self, path, params): - d = self.module.params.get('query_parameters') - if params is not None: - d.update(params) - query_string = urlencode(d, doseq=True) - - if path[0] == '/': - path = path[1:] - return '%s/%s?%s' % (self.module.params.get('api_url'), path, query_string) - - def send(self, method, path, data=None, headers=None, params=None): - url = self._url_builder(path=path, params=params) - self.warn(url) - - if headers is not None: - self.headers.update(headers) - - if self.headers['Content-Type'] == "application/json": - data = self.module.jsonify(data) - - resp, info = fetch_url( - self.module, url, data=data, headers=self.headers, method=method, - timeout=self.module.params.get('api_timeout') - ) - - # Exceptions in fetch_url may result in a status -1, the ensures a proper error to the user in all cases - if info['status'] == -1: - self.module.fail_json(msg=info['msg']) - - return Response(resp, info) - - @staticmethod - def get_user_agent_string(module): - return "ansible %s Python %s" % (module.ansible_version, sys.version.split(' ', 1)[0]) - - def get(self, path, data=None, headers=None, params=None): - return self.send(method='GET', path=path, data=data, headers=headers, params=params) - - def put(self, path, data=None, headers=None, params=None): - return self.send(method='PUT', path=path, data=data, headers=headers, params=params) - - def post(self, path, data=None, headers=None, params=None): - return self.send(method='POST', path=path, data=data, headers=headers, params=params) - - def delete(self, path, data=None, headers=None, params=None): - return self.send(method='DELETE', path=path, data=data, headers=headers, params=params) - - def patch(self, path, data=None, headers=None, params=None): - return self.send(method="PATCH", path=path, data=data, headers=headers, params=params) - - def update(self, path, data=None, headers=None, params=None): - return self.send(method="UPDATE", path=path, data=data, headers=headers, params=params) - - def warn(self, x): - self.module.warn(str(x)) - - -SCALEWAY_LOCATION = { - 'par1': { - 'name': 'Paris 1', - 'country': 'FR', - 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/fr-par-1', - 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/fr-par-1' - }, - - 'EMEA-FR-PAR1': { - 'name': 'Paris 1', - 'country': 'FR', - 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/fr-par-1', - 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/fr-par-1' - }, - - 'par2': { - 'name': 'Paris 2', - 'country': 'FR', - 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/fr-par-2', - 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/fr-par-2' - }, - - 'EMEA-FR-PAR2': { - 'name': 'Paris 2', - 'country': 'FR', - 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/fr-par-2', - 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/fr-par-2' - }, - - 'ams1': { - 'name': 'Amsterdam 1', - 'country': 'NL', - 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/nl-ams-1', - 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/nl-ams-10' - }, - - 'EMEA-NL-EVS': { - 'name': 'Amsterdam 1', - 'country': 'NL', - 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/nl-ams-1', - 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/nl-ams-1' - }, - - 'waw1': { - 'name': 'Warsaw 1', - 'country': 'PL', - 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/pl-waw-1', - 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/pl-waw-1' - }, - - 'EMEA-PL-WAW1': { - 'name': 'Warsaw 1', - 'country': 'PL', - 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/pl-waw-1', - 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/pl-waw-1' - }, -} - -SCALEWAY_ENDPOINT = "https://api.scaleway.com" - -SCALEWAY_REGIONS = [ - "fr-par", - "nl-ams", - "pl-waw", -] - -SCALEWAY_ZONES = [ - "fr-par-1", - "fr-par-2", - "nl-ams-1", - "pl-waw-1", -] diff --git a/ansible_collections/community/general/plugins/module_utils/source_control/bitbucket.py b/ansible_collections/community/general/plugins/module_utils/source_control/bitbucket.py deleted file mode 100644 index 1d584391..00000000 --- a/ansible_collections/community/general/plugins/module_utils/source_control/bitbucket.py +++ /dev/null @@ -1,94 +0,0 @@ -# -*- coding: utf-8 -*- - -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import json - -from ansible.module_utils.common.text.converters import to_text -from ansible.module_utils.basic import env_fallback -from ansible.module_utils.urls import fetch_url, basic_auth_header - - -class BitbucketHelper: - BITBUCKET_API_URL = 'https://api.bitbucket.org' - - def __init__(self, module): - self.module = module - self.access_token = None - - @staticmethod - def bitbucket_argument_spec(): - return dict( - client_id=dict(type='str', fallback=(env_fallback, ['BITBUCKET_CLIENT_ID'])), - client_secret=dict(type='str', no_log=True, fallback=(env_fallback, ['BITBUCKET_CLIENT_SECRET'])), - # TODO: - # - Rename user to username once current usage of username is removed - # - Alias user to username and deprecate it - user=dict(type='str', fallback=(env_fallback, ['BITBUCKET_USERNAME'])), - password=dict(type='str', no_log=True, fallback=(env_fallback, ['BITBUCKET_PASSWORD'])), - ) - - @staticmethod - def bitbucket_required_one_of(): - return [['client_id', 'client_secret', 'user', 'password']] - - @staticmethod - def bitbucket_required_together(): - return [['client_id', 'client_secret'], ['user', 'password']] - - def fetch_access_token(self): - if self.module.params['client_id'] and self.module.params['client_secret']: - headers = { - 'Authorization': basic_auth_header(self.module.params['client_id'], self.module.params['client_secret']), - } - - info, content = self.request( - api_url='https://bitbucket.org/site/oauth2/access_token', - method='POST', - data='grant_type=client_credentials', - headers=headers, - ) - - if info['status'] == 200: - self.access_token = content['access_token'] - else: - self.module.fail_json(msg='Failed to retrieve access token: {0}'.format(info)) - - def request(self, api_url, method, data=None, headers=None): - headers = headers or {} - - if self.access_token: - headers.update({ - 'Authorization': 'Bearer {0}'.format(self.access_token), - }) - elif self.module.params['user'] and self.module.params['password']: - headers.update({ - 'Authorization': basic_auth_header(self.module.params['user'], self.module.params['password']), - }) - - if isinstance(data, dict): - data = self.module.jsonify(data) - headers.update({ - 'Content-type': 'application/json', - }) - - response, info = fetch_url( - module=self.module, - url=api_url, - method=method, - headers=headers, - data=data, - force=True, - ) - - content = {} - - if response is not None: - body = to_text(response.read()) - if body: - content = json.loads(body) - - return info, content diff --git a/ansible_collections/community/general/plugins/module_utils/storage/emc/emc_vnx.py b/ansible_collections/community/general/plugins/module_utils/storage/emc/emc_vnx.py deleted file mode 100644 index 59225126..00000000 --- a/ansible_collections/community/general/plugins/module_utils/storage/emc/emc_vnx.py +++ /dev/null @@ -1,21 +0,0 @@ -# -*- coding: utf-8 -*- -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by Ansible -# still belong to the author of the module, and may assign their own license -# to the complete work. -# -# (c) 2018 Luca 'remix_tj' Lorenzetto -# -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -emc_vnx_argument_spec = { - 'sp_address': dict(type='str', required=True), - 'sp_user': dict(type='str', required=False, default='sysadmin'), - 'sp_password': dict(type='str', required=False, default='sysadmin', - no_log=True), -} diff --git a/ansible_collections/community/general/plugins/module_utils/storage/hpe3par/hpe3par.py b/ansible_collections/community/general/plugins/module_utils/storage/hpe3par/hpe3par.py deleted file mode 100644 index b7734444..00000000 --- a/ansible_collections/community/general/plugins/module_utils/storage/hpe3par/hpe3par.py +++ /dev/null @@ -1,95 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -from ansible.module_utils import basic - - -def convert_to_binary_multiple(size_with_unit): - if size_with_unit is None: - return -1 - valid_units = ['MiB', 'GiB', 'TiB'] - valid_unit = False - for unit in valid_units: - if size_with_unit.strip().endswith(unit): - valid_unit = True - size = size_with_unit.split(unit)[0] - if float(size) < 0: - return -1 - if not valid_unit: - raise ValueError("%s does not have a valid unit. The unit must be one of %s" % (size_with_unit, valid_units)) - - size = size_with_unit.replace(" ", "").split('iB')[0] - size_kib = basic.human_to_bytes(size) - return int(size_kib / (1024 * 1024)) - - -storage_system_spec = { - "storage_system_ip": { - "required": True, - "type": "str" - }, - "storage_system_username": { - "required": True, - "type": "str", - "no_log": True - }, - "storage_system_password": { - "required": True, - "type": "str", - "no_log": True - }, - "secure": { - "type": "bool", - "default": False - } -} - - -def cpg_argument_spec(): - spec = { - "state": { - "required": True, - "choices": ['present', 'absent'], - "type": 'str' - }, - "cpg_name": { - "required": True, - "type": "str" - }, - "domain": { - "type": "str" - }, - "growth_increment": { - "type": "str", - }, - "growth_limit": { - "type": "str", - }, - "growth_warning": { - "type": "str", - }, - "raid_type": { - "required": False, - "type": "str", - "choices": ['R0', 'R1', 'R5', 'R6'] - }, - "set_size": { - "required": False, - "type": "int" - }, - "high_availability": { - "type": "str", - "choices": ['PORT', 'CAGE', 'MAG'] - }, - "disk_type": { - "type": "str", - "choices": ['FC', 'NL', 'SSD'] - } - } - spec.update(storage_system_spec) - return spec diff --git a/ansible_collections/community/general/plugins/module_utils/version.py b/ansible_collections/community/general/plugins/module_utils/version.py deleted file mode 100644 index a236a34d..00000000 --- a/ansible_collections/community/general/plugins/module_utils/version.py +++ /dev/null @@ -1,17 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Felix Fontein -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -"""Provide version object to compare version numbers.""" - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -# Once we drop support for Ansible 2.9, ansible-base 2.10, and ansible-core 2.11, we can -# remove the _version.py file, and replace the following import by -# -# from ansible.module_utils.compat.version import LooseVersion - -from ._version import LooseVersion diff --git a/ansible_collections/community/general/plugins/module_utils/vexata.py b/ansible_collections/community/general/plugins/module_utils/vexata.py deleted file mode 100644 index 3d6fb7aa..00000000 --- a/ansible_collections/community/general/plugins/module_utils/vexata.py +++ /dev/null @@ -1,97 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2019, Sandeep Kasargod -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -HAS_VEXATAPI = True -try: - from vexatapi.vexata_api_proxy import VexataAPIProxy -except ImportError: - HAS_VEXATAPI = False - -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.basic import env_fallback - -VXOS_VERSION = None - - -def get_version(iocs_json): - if not iocs_json: - raise Exception('Invalid IOC json') - active = filter(lambda x: x['mgmtRole'], iocs_json) - if not active: - raise Exception('Unable to detect active IOC') - active = active[0] - ver = active['swVersion'] - if ver[0] != 'v': - raise Exception('Illegal version string') - ver = ver[1:ver.find('-')] - ver = map(int, ver.split('.')) - return tuple(ver) - - -def get_array(module): - """Return storage array object or fail""" - global VXOS_VERSION - array = module.params['array'] - user = module.params.get('user', None) - password = module.params.get('password', None) - validate = module.params.get('validate_certs') - - if not HAS_VEXATAPI: - module.fail_json(msg='vexatapi library is required for this module. ' - 'To install, use `pip install vexatapi`') - - if user and password: - system = VexataAPIProxy(array, user, password, verify_cert=validate) - else: - module.fail_json(msg='The user/password are required to be passed in to ' - 'the module as arguments or by setting the ' - 'VEXATA_USER and VEXATA_PASSWORD environment variables.') - try: - if system.test_connection(): - VXOS_VERSION = get_version(system.iocs()) - return system - else: - module.fail_json(msg='Test connection to array failed.') - except Exception as e: - module.fail_json(msg='Vexata API access failed: {0}'.format(to_native(e))) - - -def argument_spec(): - """Return standard base dictionary used for the argument_spec argument in AnsibleModule""" - return dict( - array=dict(type='str', - required=True), - user=dict(type='str', - fallback=(env_fallback, ['VEXATA_USER'])), - password=dict(type='str', - no_log=True, - fallback=(env_fallback, ['VEXATA_PASSWORD'])), - validate_certs=dict(type='bool', - required=False, - default=False), - ) - - -def required_together(): - """Return the default list used for the required_together argument to AnsibleModule""" - return [['user', 'password']] - - -def size_to_MiB(size): - """Convert a '[MGT]' string to MiB, return -1 on error.""" - quant = size[:-1] - exponent = size[-1] - if not quant.isdigit() or exponent not in 'MGT': - return -1 - quant = int(quant) - if exponent == 'G': - quant <<= 10 - elif exponent == 'T': - quant <<= 20 - return quant diff --git a/ansible_collections/community/general/plugins/module_utils/xenserver.py b/ansible_collections/community/general/plugins/module_utils/xenserver.py deleted file mode 100644 index 015b1021..00000000 --- a/ansible_collections/community/general/plugins/module_utils/xenserver.py +++ /dev/null @@ -1,861 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2018, Bojan Vitnik -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -import atexit -import time -import re -import traceback - -XENAPI_IMP_ERR = None -try: - import XenAPI - HAS_XENAPI = True -except ImportError: - HAS_XENAPI = False - XENAPI_IMP_ERR = traceback.format_exc() - -from ansible.module_utils.basic import env_fallback, missing_required_lib -from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION - - -def xenserver_common_argument_spec(): - return dict( - hostname=dict(type='str', - aliases=['host', 'pool'], - required=False, - default='localhost', - fallback=(env_fallback, ['XENSERVER_HOST']), - ), - username=dict(type='str', - aliases=['user', 'admin'], - required=False, - default='root', - fallback=(env_fallback, ['XENSERVER_USER'])), - password=dict(type='str', - aliases=['pass', 'pwd'], - required=False, - no_log=True, - fallback=(env_fallback, ['XENSERVER_PASSWORD'])), - validate_certs=dict(type='bool', - required=False, - default=True, - fallback=(env_fallback, ['XENSERVER_VALIDATE_CERTS'])), - ) - - -def xapi_to_module_vm_power_state(power_state): - """Maps XAPI VM power states to module VM power states.""" - module_power_state_map = { - "running": "poweredon", - "halted": "poweredoff", - "suspended": "suspended", - "paused": "paused" - } - - return module_power_state_map.get(power_state) - - -def module_to_xapi_vm_power_state(power_state): - """Maps module VM power states to XAPI VM power states.""" - vm_power_state_map = { - "poweredon": "running", - "poweredoff": "halted", - "restarted": "running", - "suspended": "suspended", - "shutdownguest": "halted", - "rebootguest": "running", - } - - return vm_power_state_map.get(power_state) - - -def is_valid_ip_addr(ip_addr): - """Validates given string as IPv4 address for given string. - - Args: - ip_addr (str): string to validate as IPv4 address. - - Returns: - bool: True if string is valid IPv4 address, else False. - """ - ip_addr_split = ip_addr.split('.') - - if len(ip_addr_split) != 4: - return False - - for ip_addr_octet in ip_addr_split: - if not ip_addr_octet.isdigit(): - return False - - ip_addr_octet_int = int(ip_addr_octet) - - if ip_addr_octet_int < 0 or ip_addr_octet_int > 255: - return False - - return True - - -def is_valid_ip_netmask(ip_netmask): - """Validates given string as IPv4 netmask. - - Args: - ip_netmask (str): string to validate as IPv4 netmask. - - Returns: - bool: True if string is valid IPv4 netmask, else False. - """ - ip_netmask_split = ip_netmask.split('.') - - if len(ip_netmask_split) != 4: - return False - - valid_octet_values = ['0', '128', '192', '224', '240', '248', '252', '254', '255'] - - for ip_netmask_octet in ip_netmask_split: - if ip_netmask_octet not in valid_octet_values: - return False - - if ip_netmask_split[0] != '255' and (ip_netmask_split[1] != '0' or ip_netmask_split[2] != '0' or ip_netmask_split[3] != '0'): - return False - elif ip_netmask_split[1] != '255' and (ip_netmask_split[2] != '0' or ip_netmask_split[3] != '0'): - return False - elif ip_netmask_split[2] != '255' and ip_netmask_split[3] != '0': - return False - - return True - - -def is_valid_ip_prefix(ip_prefix): - """Validates given string as IPv4 prefix. - - Args: - ip_prefix (str): string to validate as IPv4 prefix. - - Returns: - bool: True if string is valid IPv4 prefix, else False. - """ - if not ip_prefix.isdigit(): - return False - - ip_prefix_int = int(ip_prefix) - - if ip_prefix_int < 0 or ip_prefix_int > 32: - return False - - return True - - -def ip_prefix_to_netmask(ip_prefix, skip_check=False): - """Converts IPv4 prefix to netmask. - - Args: - ip_prefix (str): IPv4 prefix to convert. - skip_check (bool): Skip validation of IPv4 prefix - (default: False). Use if you are sure IPv4 prefix is valid. - - Returns: - str: IPv4 netmask equivalent to given IPv4 prefix if - IPv4 prefix is valid, else an empty string. - """ - if skip_check: - ip_prefix_valid = True - else: - ip_prefix_valid = is_valid_ip_prefix(ip_prefix) - - if ip_prefix_valid: - return '.'.join([str((0xffffffff << (32 - int(ip_prefix)) >> i) & 0xff) for i in [24, 16, 8, 0]]) - else: - return "" - - -def ip_netmask_to_prefix(ip_netmask, skip_check=False): - """Converts IPv4 netmask to prefix. - - Args: - ip_netmask (str): IPv4 netmask to convert. - skip_check (bool): Skip validation of IPv4 netmask - (default: False). Use if you are sure IPv4 netmask is valid. - - Returns: - str: IPv4 prefix equivalent to given IPv4 netmask if - IPv4 netmask is valid, else an empty string. - """ - if skip_check: - ip_netmask_valid = True - else: - ip_netmask_valid = is_valid_ip_netmask(ip_netmask) - - if ip_netmask_valid: - return str(sum([bin(int(i)).count("1") for i in ip_netmask.split(".")])) - else: - return "" - - -def is_valid_ip6_addr(ip6_addr): - """Validates given string as IPv6 address. - - Args: - ip6_addr (str): string to validate as IPv6 address. - - Returns: - bool: True if string is valid IPv6 address, else False. - """ - ip6_addr = ip6_addr.lower() - ip6_addr_split = ip6_addr.split(':') - - if ip6_addr_split[0] == "": - ip6_addr_split.pop(0) - - if ip6_addr_split[-1] == "": - ip6_addr_split.pop(-1) - - if len(ip6_addr_split) > 8: - return False - - if ip6_addr_split.count("") > 1: - return False - elif ip6_addr_split.count("") == 1: - ip6_addr_split.remove("") - else: - if len(ip6_addr_split) != 8: - return False - - ip6_addr_hextet_regex = re.compile('^[0-9a-f]{1,4}$') - - for ip6_addr_hextet in ip6_addr_split: - if not bool(ip6_addr_hextet_regex.match(ip6_addr_hextet)): - return False - - return True - - -def is_valid_ip6_prefix(ip6_prefix): - """Validates given string as IPv6 prefix. - - Args: - ip6_prefix (str): string to validate as IPv6 prefix. - - Returns: - bool: True if string is valid IPv6 prefix, else False. - """ - if not ip6_prefix.isdigit(): - return False - - ip6_prefix_int = int(ip6_prefix) - - if ip6_prefix_int < 0 or ip6_prefix_int > 128: - return False - - return True - - -def get_object_ref(module, name, uuid=None, obj_type="VM", fail=True, msg_prefix=""): - """Finds and returns a reference to arbitrary XAPI object. - - An object is searched by using either name (name_label) or UUID - with UUID taken precedence over name. - - Args: - module: Reference to Ansible module object. - name (str): Name (name_label) of an object to search for. - uuid (str): UUID of an object to search for. - obj_type (str): Any valid XAPI object type. See XAPI docs. - fail (bool): Should function fail with error message if object - is not found or exit silently (default: True). The function - always fails if multiple objects with same name are found. - msg_prefix (str): A string error messages should be prefixed - with (default: ""). - - Returns: - XAPI reference to found object or None if object is not found - and fail=False. - """ - xapi_session = XAPI.connect(module) - - if obj_type in ["template", "snapshot"]: - real_obj_type = "VM" - elif obj_type == "home server": - real_obj_type = "host" - elif obj_type == "ISO image": - real_obj_type = "VDI" - else: - real_obj_type = obj_type - - obj_ref = None - - # UUID has precedence over name. - if uuid: - try: - # Find object by UUID. If no object is found using given UUID, - # an exception will be generated. - obj_ref = xapi_session.xenapi_request("%s.get_by_uuid" % real_obj_type, (uuid,)) - except XenAPI.Failure as f: - if fail: - module.fail_json(msg="%s%s with UUID '%s' not found!" % (msg_prefix, obj_type, uuid)) - elif name: - try: - # Find object by name (name_label). - obj_ref_list = xapi_session.xenapi_request("%s.get_by_name_label" % real_obj_type, (name,)) - except XenAPI.Failure as f: - module.fail_json(msg="XAPI ERROR: %s" % f.details) - - # If obj_ref_list is empty. - if not obj_ref_list: - if fail: - module.fail_json(msg="%s%s with name '%s' not found!" % (msg_prefix, obj_type, name)) - # If obj_ref_list contains multiple object references. - elif len(obj_ref_list) > 1: - module.fail_json(msg="%smultiple %ss with name '%s' found! Please use UUID." % (msg_prefix, obj_type, name)) - # The obj_ref_list contains only one object reference. - else: - obj_ref = obj_ref_list[0] - else: - module.fail_json(msg="%sno valid name or UUID supplied for %s!" % (msg_prefix, obj_type)) - - return obj_ref - - -def gather_vm_params(module, vm_ref): - """Gathers all VM parameters available in XAPI database. - - Args: - module: Reference to Ansible module object. - vm_ref (str): XAPI reference to VM. - - Returns: - dict: VM parameters. - """ - # We silently return empty vm_params if bad vm_ref was supplied. - if not vm_ref or vm_ref == "OpaqueRef:NULL": - return {} - - xapi_session = XAPI.connect(module) - - try: - vm_params = xapi_session.xenapi.VM.get_record(vm_ref) - - # We need some params like affinity, VBDs, VIFs, VDIs etc. dereferenced. - - # Affinity. - if vm_params['affinity'] != "OpaqueRef:NULL": - vm_affinity = xapi_session.xenapi.host.get_record(vm_params['affinity']) - vm_params['affinity'] = vm_affinity - else: - vm_params['affinity'] = {} - - # VBDs. - vm_vbd_params_list = [xapi_session.xenapi.VBD.get_record(vm_vbd_ref) for vm_vbd_ref in vm_params['VBDs']] - - # List of VBDs is usually sorted by userdevice but we sort just - # in case. We need this list sorted by userdevice so that we can - # make positional pairing with module.params['disks']. - vm_vbd_params_list = sorted(vm_vbd_params_list, key=lambda vm_vbd_params: int(vm_vbd_params['userdevice'])) - vm_params['VBDs'] = vm_vbd_params_list - - # VDIs. - for vm_vbd_params in vm_params['VBDs']: - if vm_vbd_params['VDI'] != "OpaqueRef:NULL": - vm_vdi_params = xapi_session.xenapi.VDI.get_record(vm_vbd_params['VDI']) - else: - vm_vdi_params = {} - - vm_vbd_params['VDI'] = vm_vdi_params - - # VIFs. - vm_vif_params_list = [xapi_session.xenapi.VIF.get_record(vm_vif_ref) for vm_vif_ref in vm_params['VIFs']] - - # List of VIFs is usually sorted by device but we sort just - # in case. We need this list sorted by device so that we can - # make positional pairing with module.params['networks']. - vm_vif_params_list = sorted(vm_vif_params_list, key=lambda vm_vif_params: int(vm_vif_params['device'])) - vm_params['VIFs'] = vm_vif_params_list - - # Networks. - for vm_vif_params in vm_params['VIFs']: - if vm_vif_params['network'] != "OpaqueRef:NULL": - vm_network_params = xapi_session.xenapi.network.get_record(vm_vif_params['network']) - else: - vm_network_params = {} - - vm_vif_params['network'] = vm_network_params - - # Guest metrics. - if vm_params['guest_metrics'] != "OpaqueRef:NULL": - vm_guest_metrics = xapi_session.xenapi.VM_guest_metrics.get_record(vm_params['guest_metrics']) - vm_params['guest_metrics'] = vm_guest_metrics - else: - vm_params['guest_metrics'] = {} - - # Detect customization agent. - xenserver_version = get_xenserver_version(module) - - if (xenserver_version[0] >= 7 and xenserver_version[1] >= 0 and vm_params.get('guest_metrics') and - "feature-static-ip-setting" in vm_params['guest_metrics']['other']): - vm_params['customization_agent'] = "native" - else: - vm_params['customization_agent'] = "custom" - - except XenAPI.Failure as f: - module.fail_json(msg="XAPI ERROR: %s" % f.details) - - return vm_params - - -def gather_vm_facts(module, vm_params): - """Gathers VM facts. - - Args: - module: Reference to Ansible module object. - vm_params (dict): A dictionary with VM parameters as returned - by gather_vm_params() function. - - Returns: - dict: VM facts. - """ - # We silently return empty vm_facts if no vm_params are available. - if not vm_params: - return {} - - xapi_session = XAPI.connect(module) - - # Gather facts. - vm_facts = { - "state": xapi_to_module_vm_power_state(vm_params['power_state'].lower()), - "name": vm_params['name_label'], - "name_desc": vm_params['name_description'], - "uuid": vm_params['uuid'], - "is_template": vm_params['is_a_template'], - "folder": vm_params['other_config'].get('folder', ''), - "hardware": { - "num_cpus": int(vm_params['VCPUs_max']), - "num_cpu_cores_per_socket": int(vm_params['platform'].get('cores-per-socket', '1')), - "memory_mb": int(int(vm_params['memory_dynamic_max']) / 1048576), - }, - "disks": [], - "cdrom": {}, - "networks": [], - "home_server": vm_params['affinity'].get('name_label', ''), - "domid": vm_params['domid'], - "platform": vm_params['platform'], - "other_config": vm_params['other_config'], - "xenstore_data": vm_params['xenstore_data'], - "customization_agent": vm_params['customization_agent'], - } - - for vm_vbd_params in vm_params['VBDs']: - if vm_vbd_params['type'] == "Disk": - vm_disk_sr_params = xapi_session.xenapi.SR.get_record(vm_vbd_params['VDI']['SR']) - - vm_disk_params = { - "size": int(vm_vbd_params['VDI']['virtual_size']), - "name": vm_vbd_params['VDI']['name_label'], - "name_desc": vm_vbd_params['VDI']['name_description'], - "sr": vm_disk_sr_params['name_label'], - "sr_uuid": vm_disk_sr_params['uuid'], - "os_device": vm_vbd_params['device'], - "vbd_userdevice": vm_vbd_params['userdevice'], - } - - vm_facts['disks'].append(vm_disk_params) - elif vm_vbd_params['type'] == "CD": - if vm_vbd_params['empty']: - vm_facts['cdrom'].update(type="none") - else: - vm_facts['cdrom'].update(type="iso") - vm_facts['cdrom'].update(iso_name=vm_vbd_params['VDI']['name_label']) - - for vm_vif_params in vm_params['VIFs']: - vm_guest_metrics_networks = vm_params['guest_metrics'].get('networks', {}) - - vm_network_params = { - "name": vm_vif_params['network']['name_label'], - "mac": vm_vif_params['MAC'], - "vif_device": vm_vif_params['device'], - "mtu": vm_vif_params['MTU'], - "ip": vm_guest_metrics_networks.get("%s/ip" % vm_vif_params['device'], ''), - "prefix": "", - "netmask": "", - "gateway": "", - "ip6": [vm_guest_metrics_networks[ipv6] for ipv6 in sorted(vm_guest_metrics_networks.keys()) if ipv6.startswith("%s/ipv6/" % - vm_vif_params['device'])], - "prefix6": "", - "gateway6": "", - } - - if vm_params['customization_agent'] == "native": - if vm_vif_params['ipv4_addresses'] and vm_vif_params['ipv4_addresses'][0]: - vm_network_params['prefix'] = vm_vif_params['ipv4_addresses'][0].split('/')[1] - vm_network_params['netmask'] = ip_prefix_to_netmask(vm_network_params['prefix']) - - vm_network_params['gateway'] = vm_vif_params['ipv4_gateway'] - - if vm_vif_params['ipv6_addresses'] and vm_vif_params['ipv6_addresses'][0]: - vm_network_params['prefix6'] = vm_vif_params['ipv6_addresses'][0].split('/')[1] - - vm_network_params['gateway6'] = vm_vif_params['ipv6_gateway'] - - elif vm_params['customization_agent'] == "custom": - vm_xenstore_data = vm_params['xenstore_data'] - - for f in ['prefix', 'netmask', 'gateway', 'prefix6', 'gateway6']: - vm_network_params[f] = vm_xenstore_data.get("vm-data/networks/%s/%s" % (vm_vif_params['device'], f), "") - - vm_facts['networks'].append(vm_network_params) - - return vm_facts - - -def set_vm_power_state(module, vm_ref, power_state, timeout=300): - """Controls VM power state. - - Args: - module: Reference to Ansible module object. - vm_ref (str): XAPI reference to VM. - power_state (str): Power state to put VM into. Accepted values: - - - poweredon - - poweredoff - - restarted - - suspended - - shutdownguest - - rebootguest - - timeout (int): timeout in seconds (default: 300). - - Returns: - tuple (bool, str): Bool element is True if VM power state has - changed by calling this function, else False. Str element carries - a value of resulting power state as defined by XAPI - 'running', - 'halted' or 'suspended'. - """ - # Fail if we don't have a valid VM reference. - if not vm_ref or vm_ref == "OpaqueRef:NULL": - module.fail_json(msg="Cannot set VM power state. Invalid VM reference supplied!") - - xapi_session = XAPI.connect(module) - - power_state = power_state.replace('_', '').replace('-', '').lower() - vm_power_state_resulting = module_to_xapi_vm_power_state(power_state) - - state_changed = False - - try: - # Get current state of the VM. - vm_power_state_current = xapi_to_module_vm_power_state(xapi_session.xenapi.VM.get_power_state(vm_ref).lower()) - - if vm_power_state_current != power_state: - if power_state == "poweredon": - if not module.check_mode: - # VM can be in either halted, suspended, paused or running state. - # For VM to be in running state, start has to be called on halted, - # resume on suspended and unpause on paused VM. - if vm_power_state_current == "poweredoff": - xapi_session.xenapi.VM.start(vm_ref, False, False) - elif vm_power_state_current == "suspended": - xapi_session.xenapi.VM.resume(vm_ref, False, False) - elif vm_power_state_current == "paused": - xapi_session.xenapi.VM.unpause(vm_ref) - elif power_state == "poweredoff": - if not module.check_mode: - # hard_shutdown will halt VM regardless of current state. - xapi_session.xenapi.VM.hard_shutdown(vm_ref) - elif power_state == "restarted": - # hard_reboot will restart VM only if VM is in paused or running state. - if vm_power_state_current in ["paused", "poweredon"]: - if not module.check_mode: - xapi_session.xenapi.VM.hard_reboot(vm_ref) - else: - module.fail_json(msg="Cannot restart VM in state '%s'!" % vm_power_state_current) - elif power_state == "suspended": - # running state is required for suspend. - if vm_power_state_current == "poweredon": - if not module.check_mode: - xapi_session.xenapi.VM.suspend(vm_ref) - else: - module.fail_json(msg="Cannot suspend VM in state '%s'!" % vm_power_state_current) - elif power_state == "shutdownguest": - # running state is required for guest shutdown. - if vm_power_state_current == "poweredon": - if not module.check_mode: - if timeout == 0: - xapi_session.xenapi.VM.clean_shutdown(vm_ref) - else: - task_ref = xapi_session.xenapi.Async.VM.clean_shutdown(vm_ref) - task_result = wait_for_task(module, task_ref, timeout) - - if task_result: - module.fail_json(msg="Guest shutdown task failed: '%s'!" % task_result) - else: - module.fail_json(msg="Cannot shutdown guest when VM is in state '%s'!" % vm_power_state_current) - elif power_state == "rebootguest": - # running state is required for guest reboot. - if vm_power_state_current == "poweredon": - if not module.check_mode: - if timeout == 0: - xapi_session.xenapi.VM.clean_reboot(vm_ref) - else: - task_ref = xapi_session.xenapi.Async.VM.clean_reboot(vm_ref) - task_result = wait_for_task(module, task_ref, timeout) - - if task_result: - module.fail_json(msg="Guest reboot task failed: '%s'!" % task_result) - else: - module.fail_json(msg="Cannot reboot guest when VM is in state '%s'!" % vm_power_state_current) - else: - module.fail_json(msg="Requested VM power state '%s' is unsupported!" % power_state) - - state_changed = True - except XenAPI.Failure as f: - module.fail_json(msg="XAPI ERROR: %s" % f.details) - - return (state_changed, vm_power_state_resulting) - - -def wait_for_task(module, task_ref, timeout=300): - """Waits for async XAPI task to finish. - - Args: - module: Reference to Ansible module object. - task_ref (str): XAPI reference to task. - timeout (int): timeout in seconds (default: 300). - - Returns: - str: failure message on failure, else an empty string. - """ - # Fail if we don't have a valid task reference. - if not task_ref or task_ref == "OpaqueRef:NULL": - module.fail_json(msg="Cannot wait for task. Invalid task reference supplied!") - - xapi_session = XAPI.connect(module) - - interval = 2 - - result = "" - - # If we have to wait indefinitely, make time_left larger than 0 so we can - # enter while loop. - if timeout == 0: - time_left = 1 - else: - time_left = timeout - - try: - while time_left > 0: - task_status = xapi_session.xenapi.task.get_status(task_ref).lower() - - if task_status == "pending": - # Task is still running. - time.sleep(interval) - - # We decrease time_left only if we don't wait indefinitely. - if timeout != 0: - time_left -= interval - - continue - elif task_status == "success": - # Task is done. - break - else: - # Task failed. - result = task_status - break - else: - # We timed out. - result = "timeout" - - xapi_session.xenapi.task.destroy(task_ref) - except XenAPI.Failure as f: - module.fail_json(msg="XAPI ERROR: %s" % f.details) - - return result - - -def wait_for_vm_ip_address(module, vm_ref, timeout=300): - """Waits for VM to acquire an IP address. - - Args: - module: Reference to Ansible module object. - vm_ref (str): XAPI reference to VM. - timeout (int): timeout in seconds (default: 300). - - Returns: - dict: VM guest metrics as retrieved by - VM_guest_metrics.get_record() XAPI method with info - on IP address acquired. - """ - # Fail if we don't have a valid VM reference. - if not vm_ref or vm_ref == "OpaqueRef:NULL": - module.fail_json(msg="Cannot wait for VM IP address. Invalid VM reference supplied!") - - xapi_session = XAPI.connect(module) - - vm_guest_metrics = {} - - try: - # We translate VM power state string so that error message can be - # consistent with module VM power states. - vm_power_state = xapi_to_module_vm_power_state(xapi_session.xenapi.VM.get_power_state(vm_ref).lower()) - - if vm_power_state != 'poweredon': - module.fail_json(msg="Cannot wait for VM IP address when VM is in state '%s'!" % vm_power_state) - - interval = 2 - - # If we have to wait indefinitely, make time_left larger than 0 so we can - # enter while loop. - if timeout == 0: - time_left = 1 - else: - time_left = timeout - - while time_left > 0: - vm_guest_metrics_ref = xapi_session.xenapi.VM.get_guest_metrics(vm_ref) - - if vm_guest_metrics_ref != "OpaqueRef:NULL": - vm_guest_metrics = xapi_session.xenapi.VM_guest_metrics.get_record(vm_guest_metrics_ref) - vm_ips = vm_guest_metrics['networks'] - - if "0/ip" in vm_ips: - break - - time.sleep(interval) - - # We decrease time_left only if we don't wait indefinitely. - if timeout != 0: - time_left -= interval - else: - # We timed out. - module.fail_json(msg="Timed out waiting for VM IP address!") - - except XenAPI.Failure as f: - module.fail_json(msg="XAPI ERROR: %s" % f.details) - - return vm_guest_metrics - - -def get_xenserver_version(module): - """Returns XenServer version. - - Args: - module: Reference to Ansible module object. - - Returns: - list: Element [0] is major version. Element [1] is minor version. - Element [2] is update number. - """ - xapi_session = XAPI.connect(module) - - host_ref = xapi_session.xenapi.session.get_this_host(xapi_session._session) - - try: - xenserver_version = [int(version_number) for version_number in xapi_session.xenapi.host.get_software_version(host_ref)['product_version'].split('.')] - except ValueError: - xenserver_version = [0, 0, 0] - - return xenserver_version - - -class XAPI(object): - """Class for XAPI session management.""" - _xapi_session = None - - @classmethod - def connect(cls, module, disconnect_atexit=True): - """Establishes XAPI connection and returns session reference. - - If no existing session is available, establishes a new one - and returns it, else returns existing one. - - Args: - module: Reference to Ansible module object. - disconnect_atexit (bool): Controls if method should - register atexit handler to disconnect from XenServer - on module exit (default: True). - - Returns: - XAPI session reference. - """ - if cls._xapi_session is not None: - return cls._xapi_session - - hostname = module.params['hostname'] - username = module.params['username'] - password = module.params['password'] - ignore_ssl = not module.params['validate_certs'] - - if hostname == 'localhost': - cls._xapi_session = XenAPI.xapi_local() - username = '' - password = '' - else: - # If scheme is not specified we default to http:// because https:// - # is problematic in most setups. - if not hostname.startswith("http://") and not hostname.startswith("https://"): - hostname = "http://%s" % hostname - - try: - # ignore_ssl is supported in XenAPI library from XenServer 7.2 - # SDK onward but there is no way to tell which version we - # are using. TypeError will be raised if ignore_ssl is not - # supported. Additionally, ignore_ssl requires Python 2.7.9 - # or newer. - cls._xapi_session = XenAPI.Session(hostname, ignore_ssl=ignore_ssl) - except TypeError: - # Try without ignore_ssl. - cls._xapi_session = XenAPI.Session(hostname) - - if not password: - password = '' - - try: - cls._xapi_session.login_with_password(username, password, ANSIBLE_VERSION, 'Ansible') - except XenAPI.Failure as f: - module.fail_json(msg="Unable to log on to XenServer at %s as %s: %s" % (hostname, username, f.details)) - - # Disabling atexit should be used in special cases only. - if disconnect_atexit: - atexit.register(cls._xapi_session.logout) - - return cls._xapi_session - - -class XenServerObject(object): - """Base class for all XenServer objects. - - This class contains active XAPI session reference and common - attributes with useful info about XenServer host/pool. - - Attributes: - module: Reference to Ansible module object. - xapi_session: Reference to XAPI session. - pool_ref (str): XAPI reference to a pool currently connected to. - default_sr_ref (str): XAPI reference to a pool default - Storage Repository. - host_ref (str): XAPI rerefence to a host currently connected to. - xenserver_version (list of str): Contains XenServer major and - minor version. - """ - - def __init__(self, module): - """Inits XenServerObject using common module parameters. - - Args: - module: Reference to Ansible module object. - """ - if not HAS_XENAPI: - module.fail_json(changed=False, msg=missing_required_lib("XenAPI"), exception=XENAPI_IMP_ERR) - - self.module = module - self.xapi_session = XAPI.connect(module) - - try: - self.pool_ref = self.xapi_session.xenapi.pool.get_all()[0] - self.default_sr_ref = self.xapi_session.xenapi.pool.get_default_SR(self.pool_ref) - self.xenserver_version = get_xenserver_version(module) - except XenAPI.Failure as f: - self.module.fail_json(msg="XAPI ERROR: %s" % f.details) diff --git a/ansible_collections/community/general/plugins/modules/aerospike_migrations.py b/ansible_collections/community/general/plugins/modules/aerospike_migrations.py deleted file mode 120000 index 4d351842..00000000 --- a/ansible_collections/community/general/plugins/modules/aerospike_migrations.py +++ /dev/null @@ -1 +0,0 @@ -database/aerospike/aerospike_migrations.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/airbrake_deployment.py b/ansible_collections/community/general/plugins/modules/airbrake_deployment.py deleted file mode 120000 index 1d2a42b7..00000000 --- a/ansible_collections/community/general/plugins/modules/airbrake_deployment.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/airbrake_deployment.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/aix_devices.py b/ansible_collections/community/general/plugins/modules/aix_devices.py deleted file mode 120000 index 091e7d39..00000000 --- a/ansible_collections/community/general/plugins/modules/aix_devices.py +++ /dev/null @@ -1 +0,0 @@ -system/aix_devices.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/aix_filesystem.py b/ansible_collections/community/general/plugins/modules/aix_filesystem.py deleted file mode 120000 index 4e4076c8..00000000 --- a/ansible_collections/community/general/plugins/modules/aix_filesystem.py +++ /dev/null @@ -1 +0,0 @@ -system/aix_filesystem.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/aix_inittab.py b/ansible_collections/community/general/plugins/modules/aix_inittab.py deleted file mode 120000 index 9ce4fa68..00000000 --- a/ansible_collections/community/general/plugins/modules/aix_inittab.py +++ /dev/null @@ -1 +0,0 @@ -system/aix_inittab.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/aix_lvg.py b/ansible_collections/community/general/plugins/modules/aix_lvg.py deleted file mode 120000 index 92d71021..00000000 --- a/ansible_collections/community/general/plugins/modules/aix_lvg.py +++ /dev/null @@ -1 +0,0 @@ -system/aix_lvg.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/aix_lvol.py b/ansible_collections/community/general/plugins/modules/aix_lvol.py deleted file mode 120000 index 5ebb4034..00000000 --- a/ansible_collections/community/general/plugins/modules/aix_lvol.py +++ /dev/null @@ -1 +0,0 @@ -system/aix_lvol.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ali_instance.py b/ansible_collections/community/general/plugins/modules/ali_instance.py deleted file mode 120000 index 829f0d03..00000000 --- a/ansible_collections/community/general/plugins/modules/ali_instance.py +++ /dev/null @@ -1 +0,0 @@ -cloud/alicloud/ali_instance.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ali_instance_info.py b/ansible_collections/community/general/plugins/modules/ali_instance_info.py deleted file mode 120000 index c0e57afd..00000000 --- a/ansible_collections/community/general/plugins/modules/ali_instance_info.py +++ /dev/null @@ -1 +0,0 @@ -cloud/alicloud/ali_instance_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/alternatives.py b/ansible_collections/community/general/plugins/modules/alternatives.py deleted file mode 120000 index fdfc887b..00000000 --- a/ansible_collections/community/general/plugins/modules/alternatives.py +++ /dev/null @@ -1 +0,0 @@ -system/alternatives.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ansible_galaxy_install.py b/ansible_collections/community/general/plugins/modules/ansible_galaxy_install.py deleted file mode 120000 index 369d39db..00000000 --- a/ansible_collections/community/general/plugins/modules/ansible_galaxy_install.py +++ /dev/null @@ -1 +0,0 @@ -packaging/language/ansible_galaxy_install.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/apache2_mod_proxy.py b/ansible_collections/community/general/plugins/modules/apache2_mod_proxy.py deleted file mode 120000 index 212a1197..00000000 --- a/ansible_collections/community/general/plugins/modules/apache2_mod_proxy.py +++ /dev/null @@ -1 +0,0 @@ -web_infrastructure/apache2_mod_proxy.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/apache2_module.py b/ansible_collections/community/general/plugins/modules/apache2_module.py deleted file mode 120000 index a4d07a8c..00000000 --- a/ansible_collections/community/general/plugins/modules/apache2_module.py +++ /dev/null @@ -1 +0,0 @@ -web_infrastructure/apache2_module.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/apk.py b/ansible_collections/community/general/plugins/modules/apk.py deleted file mode 120000 index 3496ad6f..00000000 --- a/ansible_collections/community/general/plugins/modules/apk.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/apk.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/apt_repo.py b/ansible_collections/community/general/plugins/modules/apt_repo.py deleted file mode 120000 index df3dbae2..00000000 --- a/ansible_collections/community/general/plugins/modules/apt_repo.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/apt_repo.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/apt_rpm.py b/ansible_collections/community/general/plugins/modules/apt_rpm.py deleted file mode 120000 index c8b0a2e6..00000000 --- a/ansible_collections/community/general/plugins/modules/apt_rpm.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/apt_rpm.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/archive.py b/ansible_collections/community/general/plugins/modules/archive.py deleted file mode 120000 index 8e133de5..00000000 --- a/ansible_collections/community/general/plugins/modules/archive.py +++ /dev/null @@ -1 +0,0 @@ -files/archive.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/atomic_container.py b/ansible_collections/community/general/plugins/modules/atomic_container.py deleted file mode 120000 index d6afefb3..00000000 --- a/ansible_collections/community/general/plugins/modules/atomic_container.py +++ /dev/null @@ -1 +0,0 @@ -cloud/atomic/atomic_container.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/atomic_host.py b/ansible_collections/community/general/plugins/modules/atomic_host.py deleted file mode 120000 index 407f9b9a..00000000 --- a/ansible_collections/community/general/plugins/modules/atomic_host.py +++ /dev/null @@ -1 +0,0 @@ -cloud/atomic/atomic_host.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/atomic_image.py b/ansible_collections/community/general/plugins/modules/atomic_image.py deleted file mode 120000 index ca8f119e..00000000 --- a/ansible_collections/community/general/plugins/modules/atomic_image.py +++ /dev/null @@ -1 +0,0 @@ -cloud/atomic/atomic_image.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/awall.py b/ansible_collections/community/general/plugins/modules/awall.py deleted file mode 120000 index ca397959..00000000 --- a/ansible_collections/community/general/plugins/modules/awall.py +++ /dev/null @@ -1 +0,0 @@ -system/awall.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/beadm.py b/ansible_collections/community/general/plugins/modules/beadm.py deleted file mode 120000 index 48aae8df..00000000 --- a/ansible_collections/community/general/plugins/modules/beadm.py +++ /dev/null @@ -1 +0,0 @@ -system/beadm.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/bearychat.py b/ansible_collections/community/general/plugins/modules/bearychat.py deleted file mode 120000 index 66a23399..00000000 --- a/ansible_collections/community/general/plugins/modules/bearychat.py +++ /dev/null @@ -1 +0,0 @@ -notification/bearychat.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/bigpanda.py b/ansible_collections/community/general/plugins/modules/bigpanda.py deleted file mode 120000 index e3adb6c5..00000000 --- a/ansible_collections/community/general/plugins/modules/bigpanda.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/bigpanda.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/bitbucket_access_key.py b/ansible_collections/community/general/plugins/modules/bitbucket_access_key.py deleted file mode 120000 index 6719686a..00000000 --- a/ansible_collections/community/general/plugins/modules/bitbucket_access_key.py +++ /dev/null @@ -1 +0,0 @@ -source_control/bitbucket/bitbucket_access_key.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_key_pair.py b/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_key_pair.py deleted file mode 120000 index ab706ead..00000000 --- a/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_key_pair.py +++ /dev/null @@ -1 +0,0 @@ -source_control/bitbucket/bitbucket_pipeline_key_pair.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_known_host.py b/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_known_host.py deleted file mode 120000 index 0e2ff5e3..00000000 --- a/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_known_host.py +++ /dev/null @@ -1 +0,0 @@ -source_control/bitbucket/bitbucket_pipeline_known_host.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_variable.py b/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_variable.py deleted file mode 120000 index ab03fed9..00000000 --- a/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_variable.py +++ /dev/null @@ -1 +0,0 @@ -source_control/bitbucket/bitbucket_pipeline_variable.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/bower.py b/ansible_collections/community/general/plugins/modules/bower.py deleted file mode 120000 index e30c1646..00000000 --- a/ansible_collections/community/general/plugins/modules/bower.py +++ /dev/null @@ -1 +0,0 @@ -packaging/language/bower.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/bundler.py b/ansible_collections/community/general/plugins/modules/bundler.py deleted file mode 120000 index 106df0c4..00000000 --- a/ansible_collections/community/general/plugins/modules/bundler.py +++ /dev/null @@ -1 +0,0 @@ -packaging/language/bundler.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/bzr.py b/ansible_collections/community/general/plugins/modules/bzr.py deleted file mode 120000 index d04b1124..00000000 --- a/ansible_collections/community/general/plugins/modules/bzr.py +++ /dev/null @@ -1 +0,0 @@ -source_control/bzr.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/campfire.py b/ansible_collections/community/general/plugins/modules/campfire.py deleted file mode 120000 index 5a29d232..00000000 --- a/ansible_collections/community/general/plugins/modules/campfire.py +++ /dev/null @@ -1 +0,0 @@ -notification/campfire.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/capabilities.py b/ansible_collections/community/general/plugins/modules/capabilities.py deleted file mode 120000 index a4fdcb9c..00000000 --- a/ansible_collections/community/general/plugins/modules/capabilities.py +++ /dev/null @@ -1 +0,0 @@ -system/capabilities.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/cargo.py b/ansible_collections/community/general/plugins/modules/cargo.py deleted file mode 120000 index 4cfbb506..00000000 --- a/ansible_collections/community/general/plugins/modules/cargo.py +++ /dev/null @@ -1 +0,0 @@ -packaging/language/cargo.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/catapult.py b/ansible_collections/community/general/plugins/modules/catapult.py deleted file mode 120000 index 2ac0c142..00000000 --- a/ansible_collections/community/general/plugins/modules/catapult.py +++ /dev/null @@ -1 +0,0 @@ -notification/catapult.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/circonus_annotation.py b/ansible_collections/community/general/plugins/modules/circonus_annotation.py deleted file mode 120000 index e2278645..00000000 --- a/ansible_collections/community/general/plugins/modules/circonus_annotation.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/circonus_annotation.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/cisco_spark.py b/ansible_collections/community/general/plugins/modules/cisco_spark.py deleted file mode 120000 index af172516..00000000 --- a/ansible_collections/community/general/plugins/modules/cisco_spark.py +++ /dev/null @@ -1 +0,0 @@ -notification/cisco_webex.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/cisco_webex.py b/ansible_collections/community/general/plugins/modules/cisco_webex.py deleted file mode 120000 index af172516..00000000 --- a/ansible_collections/community/general/plugins/modules/cisco_webex.py +++ /dev/null @@ -1 +0,0 @@ -notification/cisco_webex.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/clc_aa_policy.py b/ansible_collections/community/general/plugins/modules/clc_aa_policy.py deleted file mode 120000 index c9c633f3..00000000 --- a/ansible_collections/community/general/plugins/modules/clc_aa_policy.py +++ /dev/null @@ -1 +0,0 @@ -cloud/centurylink/clc_aa_policy.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/clc_alert_policy.py b/ansible_collections/community/general/plugins/modules/clc_alert_policy.py deleted file mode 120000 index 50ef7db6..00000000 --- a/ansible_collections/community/general/plugins/modules/clc_alert_policy.py +++ /dev/null @@ -1 +0,0 @@ -cloud/centurylink/clc_alert_policy.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/clc_blueprint_package.py b/ansible_collections/community/general/plugins/modules/clc_blueprint_package.py deleted file mode 120000 index 3982cea6..00000000 --- a/ansible_collections/community/general/plugins/modules/clc_blueprint_package.py +++ /dev/null @@ -1 +0,0 @@ -cloud/centurylink/clc_blueprint_package.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/clc_firewall_policy.py b/ansible_collections/community/general/plugins/modules/clc_firewall_policy.py deleted file mode 120000 index 0b05ba17..00000000 --- a/ansible_collections/community/general/plugins/modules/clc_firewall_policy.py +++ /dev/null @@ -1 +0,0 @@ -cloud/centurylink/clc_firewall_policy.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/clc_group.py b/ansible_collections/community/general/plugins/modules/clc_group.py deleted file mode 120000 index 5a1f6954..00000000 --- a/ansible_collections/community/general/plugins/modules/clc_group.py +++ /dev/null @@ -1 +0,0 @@ -cloud/centurylink/clc_group.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/clc_loadbalancer.py b/ansible_collections/community/general/plugins/modules/clc_loadbalancer.py deleted file mode 120000 index e50d52f6..00000000 --- a/ansible_collections/community/general/plugins/modules/clc_loadbalancer.py +++ /dev/null @@ -1 +0,0 @@ -cloud/centurylink/clc_loadbalancer.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/clc_modify_server.py b/ansible_collections/community/general/plugins/modules/clc_modify_server.py deleted file mode 120000 index 306530d0..00000000 --- a/ansible_collections/community/general/plugins/modules/clc_modify_server.py +++ /dev/null @@ -1 +0,0 @@ -cloud/centurylink/clc_modify_server.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/clc_publicip.py b/ansible_collections/community/general/plugins/modules/clc_publicip.py deleted file mode 120000 index 682925e5..00000000 --- a/ansible_collections/community/general/plugins/modules/clc_publicip.py +++ /dev/null @@ -1 +0,0 @@ -cloud/centurylink/clc_publicip.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/clc_server.py b/ansible_collections/community/general/plugins/modules/clc_server.py deleted file mode 120000 index d59e068a..00000000 --- a/ansible_collections/community/general/plugins/modules/clc_server.py +++ /dev/null @@ -1 +0,0 @@ -cloud/centurylink/clc_server.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/clc_server_snapshot.py b/ansible_collections/community/general/plugins/modules/clc_server_snapshot.py deleted file mode 120000 index a411552e..00000000 --- a/ansible_collections/community/general/plugins/modules/clc_server_snapshot.py +++ /dev/null @@ -1 +0,0 @@ -cloud/centurylink/clc_server_snapshot.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance.py b/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance.py deleted file mode 100644 index 09754ccd..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance.py +++ /dev/null @@ -1,1013 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see http://www.gnu.org/licenses/. - -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ali_instance -short_description: Create, Start, Stop, Restart or Terminate an Instance in ECS. Add or Remove Instance to/from a Security Group. -description: - - Create, start, stop, restart, modify or terminate ecs instances. - - Add or remove ecs instances to/from security group. -options: - state: - description: - - The state of the instance after operating. - default: 'present' - choices: ['present', 'running', 'stopped', 'restarted', 'absent'] - type: str - availability_zone: - description: - - Aliyun availability zone ID in which to launch the instance. - If it is not specified, it will be allocated by system automatically. - aliases: ['alicloud_zone', 'zone_id'] - type: str - image_id: - description: - - Image ID used to launch instances. Required when C(state=present) and creating new ECS instances. - aliases: ['image'] - type: str - instance_type: - description: - - Instance type used to launch instances. Required when C(state=present) and creating new ECS instances. - aliases: ['type'] - type: str - security_groups: - description: - - A list of security group IDs. - aliases: ['group_ids'] - type: list - elements: str - vswitch_id: - description: - - The subnet ID in which to launch the instances (VPC). - aliases: ['subnet_id'] - type: str - instance_name: - description: - - The name of ECS instance, which is a string of 2 to 128 Chinese or English characters. It must begin with an - uppercase/lowercase letter or a Chinese character and can contain numerals, ".", "_" or "-". - It cannot begin with http:// or https://. - aliases: ['name'] - type: str - description: - description: - - The description of ECS instance, which is a string of 2 to 256 characters. It cannot begin with http:// or https://. - type: str - internet_charge_type: - description: - - Internet charge type of ECS instance. - default: 'PayByBandwidth' - choices: ['PayByBandwidth', 'PayByTraffic'] - type: str - max_bandwidth_in: - description: - - Maximum incoming bandwidth from the public network, measured in Mbps (Megabits per second). - default: 200 - type: int - max_bandwidth_out: - description: - - Maximum outgoing bandwidth to the public network, measured in Mbps (Megabits per second). - Required when C(allocate_public_ip=True). Ignored when C(allocate_public_ip=False). - default: 0 - type: int - host_name: - description: - - Instance host name. Ordered hostname is not supported. - type: str - unique_suffix: - description: - - Specifies whether to add sequential suffixes to the host_name. - The sequential suffix ranges from 001 to 999. - default: False - type: bool - version_added: '0.2.0' - password: - description: - - The password to login instance. After rebooting instances, modified password will take effect. - type: str - system_disk_category: - description: - - Category of the system disk. - default: 'cloud_efficiency' - choices: ['cloud_efficiency', 'cloud_ssd'] - type: str - system_disk_size: - description: - - Size of the system disk, in GB. The valid values are 40~500. - default: 40 - type: int - system_disk_name: - description: - - Name of the system disk. - type: str - system_disk_description: - description: - - Description of the system disk. - type: str - count: - description: - - The number of the new instance. An integer value which indicates how many instances that match I(count_tag) - should be running. Instances are either created or terminated based on this value. - default: 1 - type: int - count_tag: - description: - - I(count) determines how many instances based on a specific tag criteria should be present. - This can be expressed in multiple ways and is shown in the EXAMPLES section. - The specified count_tag must already exist or be passed in as the I(tags) option. - If it is not specified, it will be replaced by I(instance_name). - type: str - allocate_public_ip: - description: - - Whether allocate a public ip for the new instance. - default: False - aliases: [ 'assign_public_ip' ] - type: bool - instance_charge_type: - description: - - The charge type of the instance. - choices: ['PrePaid', 'PostPaid'] - default: 'PostPaid' - type: str - period: - description: - - The charge duration of the instance, in month. Required when C(instance_charge_type=PrePaid). - - The valid value are [1-9, 12, 24, 36]. - default: 1 - type: int - auto_renew: - description: - - Whether automate renew the charge of the instance. - type: bool - default: False - auto_renew_period: - description: - - The duration of the automatic renew the charge of the instance. Required when C(auto_renew=True). - choices: [1, 2, 3, 6, 12] - type: int - instance_ids: - description: - - A list of instance ids. It is required when need to operate existing instances. - If it is specified, I(count) will lose efficacy. - type: list - elements: str - force: - description: - - Whether the current operation needs to be execute forcibly. - default: False - type: bool - tags: - description: - - A hash/dictionaries of instance tags, to add to the new instance or for starting/stopping instance by tag. C({"key":"value"}) - aliases: ["instance_tags"] - type: dict - version_added: '0.2.0' - purge_tags: - description: - - Delete any tags not specified in the task that are on the instance. - If True, it means you have to specify all the desired tags on each task affecting an instance. - default: False - type: bool - version_added: '0.2.0' - key_name: - description: - - The name of key pair which is used to access ECS instance in SSH. - required: false - type: str - aliases: ['keypair'] - user_data: - description: - - User-defined data to customize the startup behaviors of an ECS instance and to pass data into an ECS instance. - It only will take effect when launching the new ECS instances. - required: false - type: str - ram_role_name: - description: - - The name of the instance RAM role. - type: str - version_added: '0.2.0' - spot_price_limit: - description: - - The maximum hourly price for the preemptible instance. This parameter supports a maximum of three decimal - places and takes effect when the SpotStrategy parameter is set to SpotWithPriceLimit. - type: float - version_added: '0.2.0' - spot_strategy: - description: - - The bidding mode of the pay-as-you-go instance. This parameter is valid when InstanceChargeType is set to PostPaid. - choices: ['NoSpot', 'SpotWithPriceLimit', 'SpotAsPriceGo'] - default: 'NoSpot' - type: str - version_added: '0.2.0' - period_unit: - description: - - The duration unit that you will buy the resource. It is valid when C(instance_charge_type=PrePaid) - choices: ['Month', 'Week'] - default: 'Month' - type: str - version_added: '0.2.0' - dry_run: - description: - - Specifies whether to send a dry-run request. - - If I(dry_run=True), Only a dry-run request is sent and no instance is created. The system checks whether the - required parameters are set, and validates the request format, service permissions, and available ECS instances. - If the validation fails, the corresponding error code is returned. If the validation succeeds, the DryRunOperation error code is returned. - - If I(dry_run=False), A request is sent. If the validation succeeds, the instance is created. - default: False - type: bool - version_added: '0.2.0' - include_data_disks: - description: - - Whether to change instance disks charge type when changing instance charge type. - default: True - type: bool - version_added: '0.2.0' -author: - - "He Guimin (@xiaozhu36)" -requirements: - - "python >= 3.6" - - "footmark >= 1.19.0" -extends_documentation_fragment: - - community.general.alicloud -''' - -EXAMPLES = ''' -# basic provisioning example vpc network -- name: Basic provisioning example - hosts: localhost - vars: - alicloud_access_key: - alicloud_secret_key: - alicloud_region: cn-beijing - image: ubuntu1404_64_40G_cloudinit_20160727.raw - instance_type: ecs.n4.small - vswitch_id: vsw-abcd1234 - assign_public_ip: True - max_bandwidth_out: 10 - host_name: myhost - password: mypassword - system_disk_category: cloud_efficiency - system_disk_size: 100 - internet_charge_type: PayByBandwidth - security_groups: ["sg-f2rwnfh23r"] - - instance_ids: ["i-abcd12346", "i-abcd12345"] - force: True - - tasks: - - name: Launch ECS instance in VPC network - community.general.ali_instance: - alicloud_access_key: '{{ alicloud_access_key }}' - alicloud_secret_key: '{{ alicloud_secret_key }}' - alicloud_region: '{{ alicloud_region }}' - image: '{{ image }}' - system_disk_category: '{{ system_disk_category }}' - system_disk_size: '{{ system_disk_size }}' - instance_type: '{{ instance_type }}' - vswitch_id: '{{ vswitch_id }}' - assign_public_ip: '{{ assign_public_ip }}' - internet_charge_type: '{{ internet_charge_type }}' - max_bandwidth_out: '{{ max_bandwidth_out }}' - tags: - Name: created_one - host_name: '{{ host_name }}' - password: '{{ password }}' - - - name: With count and count_tag to create a number of instances - community.general.ali_instance: - alicloud_access_key: '{{ alicloud_access_key }}' - alicloud_secret_key: '{{ alicloud_secret_key }}' - alicloud_region: '{{ alicloud_region }}' - image: '{{ image }}' - system_disk_category: '{{ system_disk_category }}' - system_disk_size: '{{ system_disk_size }}' - instance_type: '{{ instance_type }}' - assign_public_ip: '{{ assign_public_ip }}' - security_groups: '{{ security_groups }}' - internet_charge_type: '{{ internet_charge_type }}' - max_bandwidth_out: '{{ max_bandwidth_out }}' - tags: - Name: created_one - Version: 0.1 - count: 2 - count_tag: - Name: created_one - host_name: '{{ host_name }}' - password: '{{ password }}' - - - name: Start instance - community.general.ali_instance: - alicloud_access_key: '{{ alicloud_access_key }}' - alicloud_secret_key: '{{ alicloud_secret_key }}' - alicloud_region: '{{ alicloud_region }}' - instance_ids: '{{ instance_ids }}' - state: 'running' - - - name: Reboot instance forcibly - ecs: - alicloud_access_key: '{{ alicloud_access_key }}' - alicloud_secret_key: '{{ alicloud_secret_key }}' - alicloud_region: '{{ alicloud_region }}' - instance_ids: '{{ instance_ids }}' - state: 'restarted' - force: '{{ force }}' - - - name: Add instances to an security group - ecs: - alicloud_access_key: '{{ alicloud_access_key }}' - alicloud_secret_key: '{{ alicloud_secret_key }}' - alicloud_region: '{{ alicloud_region }}' - instance_ids: '{{ instance_ids }}' - security_groups: '{{ security_groups }}' -''' - -RETURN = ''' -instances: - description: List of ECS instances - returned: always - type: complex - contains: - availability_zone: - description: The availability zone of the instance is in. - returned: always - type: str - sample: cn-beijing-a - block_device_mappings: - description: Any block device mapping entries for the instance. - returned: always - type: complex - contains: - device_name: - description: The device name exposed to the instance (for example, /dev/xvda). - returned: always - type: str - sample: /dev/xvda - attach_time: - description: The time stamp when the attachment initiated. - returned: always - type: str - sample: "2018-06-25T04:08:26Z" - delete_on_termination: - description: Indicates whether the volume is deleted on instance termination. - returned: always - type: bool - sample: true - status: - description: The attachment state. - returned: always - type: str - sample: in_use - volume_id: - description: The ID of the cloud disk. - returned: always - type: str - sample: d-2zei53pjsi117y6gf9t6 - cpu: - description: The CPU core count of the instance. - returned: always - type: int - sample: 4 - creation_time: - description: The time the instance was created. - returned: always - type: str - sample: "2018-06-25T04:08Z" - description: - description: The instance description. - returned: always - type: str - sample: "my ansible instance" - eip: - description: The attribution of EIP associated with the instance. - returned: always - type: complex - contains: - allocation_id: - description: The ID of the EIP. - returned: always - type: str - sample: eip-12345 - internet_charge_type: - description: The internet charge type of the EIP. - returned: always - type: str - sample: "paybybandwidth" - ip_address: - description: EIP address. - returned: always - type: str - sample: 42.10.2.2 - expired_time: - description: The time the instance will expire. - returned: always - type: str - sample: "2099-12-31T15:59Z" - gpu: - description: The attribution of instance GPU. - returned: always - type: complex - contains: - amount: - description: The count of the GPU. - returned: always - type: int - sample: 0 - spec: - description: The specification of the GPU. - returned: always - type: str - sample: "" - host_name: - description: The host name of the instance. - returned: always - type: str - sample: iZ2zewaoZ - id: - description: Alias of instance_id. - returned: always - type: str - sample: i-abc12345 - instance_id: - description: ECS instance resource ID. - returned: always - type: str - sample: i-abc12345 - image_id: - description: The ID of the image used to launch the instance. - returned: always - type: str - sample: m-0011223344 - inner_ip_address: - description: The inner IPv4 address of the classic instance. - returned: always - type: str - sample: 10.0.0.2 - instance_charge_type: - description: The instance charge type. - returned: always - type: str - sample: PostPaid - instance_name: - description: The name of the instance. - returned: always - type: str - sample: my-ecs - instance_type: - description: The instance type of the running instance. - returned: always - type: str - sample: ecs.sn1ne.xlarge - instance_type_family: - description: The instance type family of the instance belongs. - returned: always - type: str - sample: ecs.sn1ne - internet_charge_type: - description: The billing method of the network bandwidth. - returned: always - type: str - sample: PayByBandwidth - internet_max_bandwidth_in: - description: Maximum incoming bandwidth from the internet network. - returned: always - type: int - sample: 200 - internet_max_bandwidth_out: - description: Maximum incoming bandwidth from the internet network. - returned: always - type: int - sample: 20 - io_optimized: - description: Indicates whether the instance is optimized for EBS I/O. - returned: always - type: bool - sample: false - memory: - description: Memory size of the instance. - returned: always - type: int - sample: 8192 - network_interfaces: - description: One or more network interfaces for the instance. - returned: always - type: complex - contains: - mac_address: - description: The MAC address. - returned: always - type: str - sample: "00:11:22:33:44:55" - network_interface_id: - description: The ID of the network interface. - returned: always - type: str - sample: eni-01234567 - primary_ip_address: - description: The primary IPv4 address of the network interface within the vswitch. - returned: always - type: str - sample: 10.0.0.1 - osname: - description: The operation system name of the instance owned. - returned: always - type: str - sample: CentOS - ostype: - description: The operation system type of the instance owned. - returned: always - type: str - sample: linux - private_ip_address: - description: The IPv4 address of the network interface within the subnet. - returned: always - type: str - sample: 10.0.0.1 - public_ip_address: - description: The public IPv4 address assigned to the instance or eip address - returned: always - type: str - sample: 43.0.0.1 - resource_group_id: - description: The id of the resource group to which the instance belongs. - returned: always - type: str - sample: my-ecs-group - security_groups: - description: One or more security groups for the instance. - returned: always - type: list - elements: dict - contains: - group_id: - description: The ID of the security group. - returned: always - type: str - sample: sg-0123456 - group_name: - description: The name of the security group. - returned: always - type: str - sample: my-security-group - status: - description: The current status of the instance. - returned: always - type: str - sample: running - tags: - description: Any tags assigned to the instance. - returned: always - type: dict - sample: - user_data: - description: User-defined data. - returned: always - type: dict - sample: - vswitch_id: - description: The ID of the vswitch in which the instance is running. - returned: always - type: str - sample: vsw-dew00abcdef - vpc_id: - description: The ID of the VPC the instance is in. - returned: always - type: str - sample: vpc-0011223344 - spot_price_limit: - description: - - The maximum hourly price for the preemptible instance. - returned: always - type: float - sample: 0.97 - spot_strategy: - description: - - The bidding mode of the pay-as-you-go instance. - returned: always - type: str - sample: NoSpot -ids: - description: List of ECS instance IDs - returned: always - type: list - sample: [i-12345er, i-3245fs] -''' - -import re -import time -import traceback -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import ecs_argument_spec, ecs_connect - -HAS_FOOTMARK = False -FOOTMARK_IMP_ERR = None -try: - from footmark.exception import ECSResponseError - HAS_FOOTMARK = True -except ImportError: - FOOTMARK_IMP_ERR = traceback.format_exc() - HAS_FOOTMARK = False - - -def get_instances_info(connection, ids): - result = [] - instances = connection.describe_instances(instance_ids=ids) - if len(instances) > 0: - for inst in instances: - volumes = connection.describe_disks(instance_id=inst.id) - setattr(inst, 'block_device_mappings', volumes) - setattr(inst, 'user_data', inst.describe_user_data()) - result.append(inst.read()) - return result - - -def run_instance(module, ecs, exact_count): - if exact_count <= 0: - return None - zone_id = module.params['availability_zone'] - image_id = module.params['image_id'] - instance_type = module.params['instance_type'] - security_groups = module.params['security_groups'] - vswitch_id = module.params['vswitch_id'] - instance_name = module.params['instance_name'] - description = module.params['description'] - internet_charge_type = module.params['internet_charge_type'] - max_bandwidth_out = module.params['max_bandwidth_out'] - max_bandwidth_in = module.params['max_bandwidth_in'] - host_name = module.params['host_name'] - password = module.params['password'] - system_disk_category = module.params['system_disk_category'] - system_disk_size = module.params['system_disk_size'] - system_disk_name = module.params['system_disk_name'] - system_disk_description = module.params['system_disk_description'] - allocate_public_ip = module.params['allocate_public_ip'] - period = module.params['period'] - auto_renew = module.params['auto_renew'] - instance_charge_type = module.params['instance_charge_type'] - auto_renew_period = module.params['auto_renew_period'] - user_data = module.params['user_data'] - key_name = module.params['key_name'] - ram_role_name = module.params['ram_role_name'] - spot_price_limit = module.params['spot_price_limit'] - spot_strategy = module.params['spot_strategy'] - unique_suffix = module.params['unique_suffix'] - # check whether the required parameter passed or not - if not image_id: - module.fail_json(msg='image_id is required for new instance') - if not instance_type: - module.fail_json(msg='instance_type is required for new instance') - if not isinstance(security_groups, list): - module.fail_json(msg='The parameter security_groups should be a list, aborting') - if len(security_groups) <= 0: - module.fail_json(msg='Expected the parameter security_groups is non-empty when create new ECS instances, aborting') - - client_token = "Ansible-Alicloud-{0}-{1}".format(hash(str(module.params)), str(time.time())) - - try: - # call to create_instance method from footmark - instances = ecs.run_instances(image_id=image_id, instance_type=instance_type, security_group_id=security_groups[0], - zone_id=zone_id, instance_name=instance_name, description=description, - internet_charge_type=internet_charge_type, internet_max_bandwidth_out=max_bandwidth_out, - internet_max_bandwidth_in=max_bandwidth_in, host_name=host_name, password=password, - io_optimized='optimized', system_disk_category=system_disk_category, - system_disk_size=system_disk_size, system_disk_disk_name=system_disk_name, - system_disk_description=system_disk_description, vswitch_id=vswitch_id, - amount=exact_count, instance_charge_type=instance_charge_type, period=period, period_unit="Month", - auto_renew=auto_renew, auto_renew_period=auto_renew_period, key_pair_name=key_name, - user_data=user_data, client_token=client_token, ram_role_name=ram_role_name, - spot_price_limit=spot_price_limit, spot_strategy=spot_strategy, unique_suffix=unique_suffix) - - except Exception as e: - module.fail_json(msg='Unable to create instance, error: {0}'.format(e)) - - return instances - - -def modify_instance(module, instance): - # According to state to modify instance's some special attribute - state = module.params["state"] - name = module.params['instance_name'] - unique_suffix = module.params['unique_suffix'] - if not name: - name = instance.name - - description = module.params['description'] - if not description: - description = instance.description - - host_name = module.params['host_name'] - if unique_suffix and host_name: - suffix = instance.host_name[-3:] - host_name = host_name + suffix - - if not host_name: - host_name = instance.host_name - - # password can be modified only when restart instance - password = "" - if state == "restarted": - password = module.params['password'] - - # userdata can be modified only when instance is stopped - setattr(instance, "user_data", instance.describe_user_data()) - user_data = instance.user_data - if state == "stopped": - user_data = module.params['user_data'].encode() - - try: - return instance.modify(name=name, description=description, host_name=host_name, password=password, user_data=user_data) - except Exception as e: - module.fail_json(msg="Modify instance {0} attribute got an error: {1}".format(instance.id, e)) - - -def wait_for_instance_modify_charge(ecs, instance_ids, charge_type, delay=10, timeout=300): - """ - To verify instance charge type has become expected after modify instance charge type - """ - try: - while True: - instances = ecs.describe_instances(instance_ids=instance_ids) - flag = True - for inst in instances: - if inst and inst.instance_charge_type != charge_type: - flag = False - if flag: - return - timeout -= delay - time.sleep(delay) - if timeout <= 0: - raise Exception("Timeout Error: Waiting for instance to {0}. ".format(charge_type)) - except Exception as e: - raise e - - -def main(): - argument_spec = ecs_argument_spec() - argument_spec.update(dict( - security_groups=dict(type='list', elements='str', aliases=['group_ids']), - availability_zone=dict(type='str', aliases=['alicloud_zone', 'zone_id']), - instance_type=dict(type='str', aliases=['type']), - image_id=dict(type='str', aliases=['image']), - count=dict(type='int', default=1), - count_tag=dict(type='str'), - vswitch_id=dict(type='str', aliases=['subnet_id']), - instance_name=dict(type='str', aliases=['name']), - host_name=dict(type='str'), - password=dict(type='str', no_log=True), - internet_charge_type=dict(type='str', default='PayByBandwidth', choices=['PayByBandwidth', 'PayByTraffic']), - max_bandwidth_in=dict(type='int', default=200), - max_bandwidth_out=dict(type='int', default=0), - system_disk_category=dict(type='str', default='cloud_efficiency', choices=['cloud_efficiency', 'cloud_ssd']), - system_disk_size=dict(type='int', default=40), - system_disk_name=dict(type='str'), - system_disk_description=dict(type='str'), - force=dict(type='bool', default=False), - tags=dict(type='dict', aliases=['instance_tags']), - purge_tags=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'running', 'stopped', 'restarted', 'absent']), - description=dict(type='str'), - allocate_public_ip=dict(type='bool', aliases=['assign_public_ip'], default=False), - instance_charge_type=dict(type='str', default='PostPaid', choices=['PrePaid', 'PostPaid']), - period=dict(type='int', default=1), - auto_renew=dict(type='bool', default=False), - instance_ids=dict(type='list', elements='str'), - auto_renew_period=dict(type='int', choices=[1, 2, 3, 6, 12]), - key_name=dict(type='str', aliases=['keypair']), - user_data=dict(type='str'), - ram_role_name=dict(type='str'), - spot_price_limit=dict(type='float'), - spot_strategy=dict(type='str', default='NoSpot', choices=['NoSpot', 'SpotWithPriceLimit', 'SpotAsPriceGo']), - unique_suffix=dict(type='bool', default=False), - period_unit=dict(type='str', default='Month', choices=['Month', 'Week']), - dry_run=dict(type='bool', default=False), - include_data_disks=dict(type='bool', default=True) - ) - ) - module = AnsibleModule(argument_spec=argument_spec) - - if HAS_FOOTMARK is False: - module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR) - - ecs = ecs_connect(module) - host_name = module.params['host_name'] - state = module.params['state'] - instance_ids = module.params['instance_ids'] - count_tag = module.params['count_tag'] - count = module.params['count'] - instance_name = module.params['instance_name'] - force = module.params['force'] - zone_id = module.params['availability_zone'] - key_name = module.params['key_name'] - tags = module.params['tags'] - max_bandwidth_out = module.params['max_bandwidth_out'] - instance_charge_type = module.params['instance_charge_type'] - if instance_charge_type == "PrePaid": - module.params['spot_strategy'] = '' - changed = False - - instances = [] - if instance_ids: - if not isinstance(instance_ids, list): - module.fail_json(msg='The parameter instance_ids should be a list, aborting') - instances = ecs.describe_instances(zone_id=zone_id, instance_ids=instance_ids) - if not instances: - module.fail_json(msg="There are no instances in our record based on instance_ids {0}. " - "Please check it and try again.".format(instance_ids)) - elif count_tag: - instances = ecs.describe_instances(zone_id=zone_id, tags=eval(count_tag)) - elif instance_name: - instances = ecs.describe_instances(zone_id=zone_id, instance_name=instance_name) - - ids = [] - if state == 'absent': - if len(instances) < 1: - module.fail_json(msg='Please specify ECS instances that you want to operate by using ' - 'parameters instance_ids, tags or instance_name, aborting') - try: - targets = [] - for inst in instances: - if inst.status != 'stopped' and not force: - module.fail_json(msg="Instance is running, and please stop it or set 'force' as True.") - targets.append(inst.id) - if ecs.delete_instances(instance_ids=targets, force=force): - changed = True - ids.extend(targets) - - module.exit_json(changed=changed, ids=ids, instances=[]) - except Exception as e: - module.fail_json(msg='Delete instance got an error: {0}'.format(e)) - - if module.params['allocate_public_ip'] and max_bandwidth_out < 0: - module.fail_json(msg="'max_bandwidth_out' should be greater than 0 when 'allocate_public_ip' is True.") - if not module.params['allocate_public_ip']: - module.params['max_bandwidth_out'] = 0 - - if state == 'present': - if not instance_ids: - if len(instances) > count: - for i in range(0, len(instances) - count): - inst = instances[len(instances) - 1] - if inst.status != 'stopped' and not force: - module.fail_json(msg="That to delete instance {0} is failed results from it is running, " - "and please stop it or set 'force' as True.".format(inst.id)) - try: - if inst.terminate(force=force): - changed = True - except Exception as e: - module.fail_json(msg="Delete instance {0} got an error: {1}".format(inst.id, e)) - instances.pop(len(instances) - 1) - else: - try: - if re.search(r"-\[\d+,\d+\]-", host_name): - module.fail_json(msg='Ordered hostname is not supported, If you want to add an ordered ' - 'suffix to the hostname, you can set unique_suffix to True') - new_instances = run_instance(module, ecs, count - len(instances)) - if new_instances: - changed = True - instances.extend(new_instances) - except Exception as e: - module.fail_json(msg="Create new instances got an error: {0}".format(e)) - - # Security Group join/leave begin - security_groups = module.params['security_groups'] - if security_groups: - if not isinstance(security_groups, list): - module.fail_json(msg='The parameter security_groups should be a list, aborting') - for inst in instances: - existing = inst.security_group_ids['security_group_id'] - remove = list(set(existing).difference(set(security_groups))) - add = list(set(security_groups).difference(set(existing))) - for sg in remove: - if inst.leave_security_group(sg): - changed = True - for sg in add: - if inst.join_security_group(sg): - changed = True - # Security Group join/leave ends here - - # Attach/Detach key pair - inst_ids = [] - for inst in instances: - if key_name is not None and key_name != inst.key_name: - if key_name == "": - if inst.detach_key_pair(): - changed = True - else: - inst_ids.append(inst.id) - if inst_ids: - changed = ecs.attach_key_pair(instance_ids=inst_ids, key_pair_name=key_name) - - # Modify instance attribute - for inst in instances: - if modify_instance(module, inst): - changed = True - if inst.id not in ids: - ids.append(inst.id) - - # Modify instance charge type - ids = [] - for inst in instances: - if inst.instance_charge_type != instance_charge_type: - ids.append(inst.id) - if ids: - params = {"instance_ids": ids, "instance_charge_type": instance_charge_type, - "include_data_disks": module.params['include_data_disks'], "dry_run": module.params['dry_run'], - "auto_pay": True} - if instance_charge_type == 'PrePaid': - params['period'] = module.params['period'] - params['period_unit'] = module.params['period_unit'] - - if ecs.modify_instance_charge_type(**params): - changed = True - wait_for_instance_modify_charge(ecs, ids, instance_charge_type) - - else: - if len(instances) < 1: - module.fail_json(msg='Please specify ECS instances that you want to operate by using ' - 'parameters instance_ids, tags or instance_name, aborting') - if state == 'running': - try: - targets = [] - for inst in instances: - if modify_instance(module, inst): - changed = True - if inst.status != "running": - targets.append(inst.id) - ids.append(inst.id) - if targets and ecs.start_instances(instance_ids=targets): - changed = True - ids.extend(targets) - except Exception as e: - module.fail_json(msg='Start instances got an error: {0}'.format(e)) - elif state == 'stopped': - try: - targets = [] - for inst in instances: - if inst.status != "stopped": - targets.append(inst.id) - if targets and ecs.stop_instances(instance_ids=targets, force_stop=force): - changed = True - ids.extend(targets) - for inst in instances: - if modify_instance(module, inst): - changed = True - except Exception as e: - module.fail_json(msg='Stop instances got an error: {0}'.format(e)) - elif state == 'restarted': - try: - targets = [] - for inst in instances: - if modify_instance(module, inst): - changed = True - targets.append(inst.id) - if ecs.reboot_instances(instance_ids=targets, force_stop=module.params['force']): - changed = True - ids.extend(targets) - except Exception as e: - module.fail_json(msg='Reboot instances got an error: {0}'.format(e)) - - tags = module.params['tags'] - if module.params['purge_tags']: - for inst in instances: - if not tags: - tags = inst.tags - try: - if inst.remove_tags(tags): - changed = True - except Exception as e: - module.fail_json(msg="{0}".format(e)) - module.exit_json(changed=changed, instances=get_instances_info(ecs, ids)) - - if tags: - for inst in instances: - try: - if inst.add_tags(tags): - changed = True - except Exception as e: - module.fail_json(msg="{0}".format(e)) - module.exit_json(changed=changed, instances=get_instances_info(ecs, ids)) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance_info.py b/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance_info.py deleted file mode 100644 index 06df6cb4..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance_info.py +++ /dev/null @@ -1,444 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see http://www.gnu.org/licenses/. - -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ali_instance_info -short_description: Gather information on instances of Alibaba Cloud ECS. -description: - - This module fetches data from the Open API in Alicloud. - The module must be called from within the ECS instance itself. - - This module was called C(ali_instance_facts) before Ansible 2.9. The usage did not change. - -options: - availability_zone: - description: - - Aliyun availability zone ID in which to launch the instance. - - Deprecated parameter, it will be removed in community.general 5.0.0. Please use filter item I(zone_id) instead. - aliases: ['alicloud_zone'] - type: str - instance_names: - description: - - A list of ECS instance names. - - Deprecated parameter, it will be removed in community.general 5.0.0. Please use filter item I(instance_name) instead. - aliases: ["names"] - type: list - elements: str - instance_ids: - description: - - A list of ECS instance ids. - aliases: ["ids"] - type: list - elements: str - name_prefix: - description: - - Use a instance name prefix to filter ecs instances. - type: str - version_added: '0.2.0' - tags: - description: - - A hash/dictionaries of instance tags. C({"key":"value"}) - aliases: ["instance_tags"] - type: dict - filters: - description: - - A dict of filters to apply. Each dict item consists of a filter key and a filter value. The filter keys can be - all of request parameters. See U(https://www.alibabacloud.com/help/doc-detail/25506.htm) for parameter details. - Filter keys can be same as request parameter name or be lower case and use underscore ("_") or dash ("-") to - connect different words in one parameter. 'InstanceIds' should be a list and it will be appended to - I(instance_ids) automatically. 'Tag.n.Key' and 'Tag.n.Value' should be a dict and using I(tags) instead. - type: dict - version_added: '0.2.0' -author: - - "He Guimin (@xiaozhu36)" -requirements: - - "python >= 3.6" - - "footmark >= 1.13.0" -extends_documentation_fragment: - - community.general.alicloud -''' - -EXAMPLES = ''' -# Fetch instances details according to setting different filters - -- name: Find all instances in the specified region - community.general.ali_instance_info: - register: all_instances - -- name: Find all instances based on the specified ids - community.general.ali_instance_info: - instance_ids: - - "i-35b333d9" - - "i-ddav43kd" - register: instances_by_ids - -- name: Find all instances based on the specified name_prefix - community.general.ali_instance_info: - name_prefix: "ecs_instance_" - register: instances_by_name_prefix - -- name: Find instances based on tags - community.general.ali_instance_info: - tags: - Test: "add" -''' - -RETURN = ''' -instances: - description: List of ECS instances - returned: always - type: complex - contains: - availability_zone: - description: The availability zone of the instance is in. - returned: always - type: str - sample: cn-beijing-a - block_device_mappings: - description: Any block device mapping entries for the instance. - returned: always - type: complex - contains: - device_name: - description: The device name exposed to the instance (for example, /dev/xvda). - returned: always - type: str - sample: /dev/xvda - attach_time: - description: The time stamp when the attachment initiated. - returned: always - type: str - sample: "2018-06-25T04:08:26Z" - delete_on_termination: - description: Indicates whether the volume is deleted on instance termination. - returned: always - type: bool - sample: true - status: - description: The attachment state. - returned: always - type: str - sample: in_use - volume_id: - description: The ID of the cloud disk. - returned: always - type: str - sample: d-2zei53pjsi117y6gf9t6 - cpu: - description: The CPU core count of the instance. - returned: always - type: int - sample: 4 - creation_time: - description: The time the instance was created. - returned: always - type: str - sample: "2018-06-25T04:08Z" - description: - description: The instance description. - returned: always - type: str - sample: "my ansible instance" - eip: - description: The attribution of EIP associated with the instance. - returned: always - type: complex - contains: - allocation_id: - description: The ID of the EIP. - returned: always - type: str - sample: eip-12345 - internet_charge_type: - description: The internet charge type of the EIP. - returned: always - type: str - sample: "paybybandwidth" - ip_address: - description: EIP address. - returned: always - type: str - sample: 42.10.2.2 - expired_time: - description: The time the instance will expire. - returned: always - type: str - sample: "2099-12-31T15:59Z" - gpu: - description: The attribution of instance GPU. - returned: always - type: complex - contains: - amount: - description: The count of the GPU. - returned: always - type: int - sample: 0 - spec: - description: The specification of the GPU. - returned: always - type: str - sample: "" - host_name: - description: The host name of the instance. - returned: always - type: str - sample: iZ2zewaoZ - id: - description: Alias of instance_id. - returned: always - type: str - sample: i-abc12345 - instance_id: - description: ECS instance resource ID. - returned: always - type: str - sample: i-abc12345 - image_id: - description: The ID of the image used to launch the instance. - returned: always - type: str - sample: m-0011223344 - inner_ip_address: - description: The inner IPv4 address of the classic instance. - returned: always - type: str - sample: 10.0.0.2 - instance_charge_type: - description: The instance charge type. - returned: always - type: str - sample: PostPaid - instance_name: - description: The name of the instance. - returned: always - type: str - sample: my-ecs - instance_type_family: - description: The instance type family of the instance belongs. - returned: always - type: str - sample: ecs.sn1ne - instance_type: - description: The instance type of the running instance. - returned: always - type: str - sample: ecs.sn1ne.xlarge - internet_charge_type: - description: The billing method of the network bandwidth. - returned: always - type: str - sample: PayByBandwidth - internet_max_bandwidth_in: - description: Maximum incoming bandwidth from the internet network. - returned: always - type: int - sample: 200 - internet_max_bandwidth_out: - description: Maximum incoming bandwidth from the internet network. - returned: always - type: int - sample: 20 - io_optimized: - description: Indicates whether the instance is optimized for EBS I/O. - returned: always - type: bool - sample: false - memory: - description: Memory size of the instance. - returned: always - type: int - sample: 8192 - network_interfaces: - description: One or more network interfaces for the instance. - returned: always - type: complex - contains: - mac_address: - description: The MAC address. - returned: always - type: str - sample: "00:11:22:33:44:55" - network_interface_id: - description: The ID of the network interface. - returned: always - type: str - sample: eni-01234567 - primary_ip_address: - description: The primary IPv4 address of the network interface within the vswitch. - returned: always - type: str - sample: 10.0.0.1 - osname: - description: The operation system name of the instance owned. - returned: always - type: str - sample: CentOS - ostype: - description: The operation system type of the instance owned. - returned: always - type: str - sample: linux - private_ip_address: - description: The IPv4 address of the network interface within the subnet. - returned: always - type: str - sample: 10.0.0.1 - public_ip_address: - description: The public IPv4 address assigned to the instance or eip address - returned: always - type: str - sample: 43.0.0.1 - resource_group_id: - description: The id of the resource group to which the instance belongs. - returned: always - type: str - sample: my-ecs-group - security_groups: - description: One or more security groups for the instance. - returned: always - type: list - elements: dict - contains: - group_id: - description: The ID of the security group. - returned: always - type: str - sample: sg-0123456 - group_name: - description: The name of the security group. - returned: always - type: str - sample: my-security-group - status: - description: The current status of the instance. - returned: always - type: str - sample: running - tags: - description: Any tags assigned to the instance. - returned: always - type: dict - sample: - vswitch_id: - description: The ID of the vswitch in which the instance is running. - returned: always - type: str - sample: vsw-dew00abcdef - vpc_id: - description: The ID of the VPC the instance is in. - returned: always - type: str - sample: vpc-0011223344 -ids: - description: List of ECS instance IDs - returned: always - type: list - sample: [i-12345er, i-3245fs] -''' - -import traceback -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import ecs_argument_spec, ecs_connect - -HAS_FOOTMARK = False -FOOTMARK_IMP_ERR = None -try: - from footmark.exception import ECSResponseError - HAS_FOOTMARK = True -except ImportError: - FOOTMARK_IMP_ERR = traceback.format_exc() - HAS_FOOTMARK = False - - -def main(): - argument_spec = ecs_argument_spec() - argument_spec.update(dict( - availability_zone=dict(aliases=['alicloud_zone'], - removed_in_version="5.0.0", removed_from_collection="community.general"), - instance_ids=dict(type='list', elements='str', aliases=['ids'], - removed_in_version="5.0.0", removed_from_collection="community.general"), - instance_names=dict(type='list', elements='str', aliases=['names']), - name_prefix=dict(type='str'), - tags=dict(type='dict', aliases=['instance_tags']), - filters=dict(type='dict') - ) - ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - if HAS_FOOTMARK is False: - module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR) - - ecs = ecs_connect(module) - - instances = [] - instance_ids = [] - ids = module.params['instance_ids'] - name_prefix = module.params['name_prefix'] - names = module.params['instance_names'] - zone_id = module.params['availability_zone'] - if ids and (not isinstance(ids, list) or len(ids) < 1): - module.fail_json(msg='instance_ids should be a list of instances, aborting') - - if names and (not isinstance(names, list) or len(names) < 1): - module.fail_json(msg='instance_names should be a list of instances, aborting') - - filters = module.params['filters'] - if not filters: - filters = {} - if not ids: - ids = [] - for key, value in list(filters.items()): - if key in ["InstanceIds", "instance_ids", "instance-ids"] and isinstance(ids, list): - for id in value: - if id not in ids: - ids.append(value) - if ids: - filters['instance_ids'] = ids - if module.params['tags']: - filters['tags'] = module.params['tags'] - if zone_id: - filters['zone_id'] = zone_id - if names: - filters['instance_name'] = names[0] - - for inst in ecs.describe_instances(**filters): - if name_prefix: - if not str(inst.instance_name).startswith(name_prefix): - continue - volumes = ecs.describe_disks(instance_id=inst.id) - setattr(inst, 'block_device_mappings', volumes) - setattr(inst, 'user_data', inst.describe_user_data()) - instances.append(inst.read()) - instance_ids.append(inst.id) - - module.exit_json(changed=False, ids=instance_ids, instances=instances) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_container.py b/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_container.py deleted file mode 100644 index ca631256..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_container.py +++ /dev/null @@ -1,208 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: atomic_container -short_description: Manage the containers on the atomic host platform -description: - - Manage the containers on the atomic host platform. - - Allows to manage the lifecycle of a container on the atomic host platform. -author: "Giuseppe Scrivano (@giuseppe)" -notes: - - Host should support C(atomic) command -requirements: - - atomic - - "python >= 2.6" -options: - backend: - description: - - Define the backend to use for the container. - required: True - choices: ["docker", "ostree"] - type: str - name: - description: - - Name of the container. - required: True - type: str - image: - description: - - The image to use to install the container. - required: True - type: str - rootfs: - description: - - Define the rootfs of the image. - type: str - state: - description: - - State of the container. - choices: ["absent", "latest", "present", "rollback"] - default: "latest" - type: str - mode: - description: - - Define if it is an user or a system container. - choices: ["user", "system"] - type: str - values: - description: - - Values for the installation of the container. - - This option is permitted only with mode 'user' or 'system'. - - The values specified here will be used at installation time as --set arguments for atomic install. - type: list - elements: str -''' - -EXAMPLES = r''' - -- name: Install the etcd system container - community.general.atomic_container: - name: etcd - image: rhel/etcd - backend: ostree - state: latest - mode: system - values: - - ETCD_NAME=etcd.server - -- name: Uninstall the etcd system container - community.general.atomic_container: - name: etcd - image: rhel/etcd - backend: ostree - state: absent - mode: system -''' - -RETURN = r''' -msg: - description: The command standard output - returned: always - type: str - sample: [u'Using default tag: latest ...'] -''' - -# import module snippets -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - - -def do_install(module, mode, rootfs, container, image, values_list, backend): - system_list = ["--system"] if mode == 'system' else [] - user_list = ["--user"] if mode == 'user' else [] - rootfs_list = ["--rootfs=%s" % rootfs] if rootfs else [] - atomic_bin = module.get_bin_path('atomic') - args = [atomic_bin, 'install', "--storage=%s" % backend, '--name=%s' % container] + system_list + user_list + rootfs_list + values_list + [image] - rc, out, err = module.run_command(args, check_rc=False) - if rc != 0: - module.fail_json(rc=rc, msg=err) - else: - changed = "Extracting" in out or "Copying blob" in out - module.exit_json(msg=out, changed=changed) - - -def do_update(module, container, image, values_list): - atomic_bin = module.get_bin_path('atomic') - args = [atomic_bin, 'containers', 'update', "--rebase=%s" % image] + values_list + [container] - rc, out, err = module.run_command(args, check_rc=False) - if rc != 0: - module.fail_json(rc=rc, msg=err) - else: - changed = "Extracting" in out or "Copying blob" in out - module.exit_json(msg=out, changed=changed) - - -def do_uninstall(module, name, backend): - atomic_bin = module.get_bin_path('atomic') - args = [atomic_bin, 'uninstall', "--storage=%s" % backend, name] - rc, out, err = module.run_command(args, check_rc=False) - if rc != 0: - module.fail_json(rc=rc, msg=err) - module.exit_json(msg=out, changed=True) - - -def do_rollback(module, name): - atomic_bin = module.get_bin_path('atomic') - args = [atomic_bin, 'containers', 'rollback', name] - rc, out, err = module.run_command(args, check_rc=False) - if rc != 0: - module.fail_json(rc=rc, msg=err) - else: - changed = "Rolling back" in out - module.exit_json(msg=out, changed=changed) - - -def core(module): - mode = module.params['mode'] - name = module.params['name'] - image = module.params['image'] - rootfs = module.params['rootfs'] - values = module.params['values'] - backend = module.params['backend'] - state = module.params['state'] - - atomic_bin = module.get_bin_path('atomic') - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') - - values_list = ["--set=%s" % x for x in values] if values else [] - - args = [atomic_bin, 'containers', 'list', '--no-trunc', '-n', '--all', '-f', 'backend=%s' % backend, '-f', 'container=%s' % name] - rc, out, err = module.run_command(args, check_rc=False) - if rc != 0: - module.fail_json(rc=rc, msg=err) - return - present = name in out - - if state == 'present' and present: - module.exit_json(msg=out, changed=False) - elif (state in ['latest', 'present']) and not present: - do_install(module, mode, rootfs, name, image, values_list, backend) - elif state == 'latest': - do_update(module, name, image, values_list) - elif state == 'absent': - if not present: - module.exit_json(msg="The container is not present", changed=False) - else: - do_uninstall(module, name, backend) - elif state == 'rollback': - do_rollback(module, name) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - mode=dict(default=None, choices=['user', 'system']), - name=dict(required=True), - image=dict(required=True), - rootfs=dict(default=None), - state=dict(default='latest', choices=['present', 'absent', 'latest', 'rollback']), - backend=dict(required=True, choices=['docker', 'ostree']), - values=dict(type='list', default=[], elements='str'), - ), - ) - - if module.params['values'] is not None and module.params['mode'] == 'default': - module.fail_json(msg="values is supported only with user or system mode") - - # Verify that the platform supports atomic command - dummy = module.get_bin_path('atomic', required=True) - - try: - core(module) - except Exception as e: - module.fail_json(msg='Unanticipated error running atomic: %s' % to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_host.py b/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_host.py deleted file mode 100644 index 85b00f91..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_host.py +++ /dev/null @@ -1,97 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: atomic_host -short_description: Manage the atomic host platform -description: - - Manage the atomic host platform. - - Rebooting of Atomic host platform should be done outside this module. -author: -- Saravanan KR (@krsacme) -notes: - - Host should be an atomic platform (verified by existence of '/run/ostree-booted' file). -requirements: - - atomic - - python >= 2.6 -options: - revision: - description: - - The version number of the atomic host to be deployed. - - Providing C(latest) will upgrade to the latest available version. - default: 'latest' - aliases: [ version ] - type: str -''' - -EXAMPLES = r''' -- name: Upgrade the atomic host platform to the latest version (atomic host upgrade) - community.general.atomic_host: - revision: latest - -- name: Deploy a specific revision as the atomic host (atomic host deploy 23.130) - community.general.atomic_host: - revision: 23.130 -''' - -RETURN = r''' -msg: - description: The command standard output - returned: always - type: str - sample: 'Already on latest' -''' -import os -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - - -def core(module): - revision = module.params['revision'] - atomic_bin = module.get_bin_path('atomic', required=True) - - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') - - if revision == 'latest': - args = [atomic_bin, 'host', 'upgrade'] - else: - args = [atomic_bin, 'host', 'deploy', revision] - - rc, out, err = module.run_command(args, check_rc=False) - - if rc == 77 and revision == 'latest': - module.exit_json(msg="Already on latest", changed=False) - elif rc != 0: - module.fail_json(rc=rc, msg=err) - else: - module.exit_json(msg=out, changed=True) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - revision=dict(type='str', default='latest', aliases=["version"]), - ), - ) - - # Verify that the platform is atomic host - if not os.path.exists("/run/ostree-booted"): - module.fail_json(msg="Module atomic_host is applicable for Atomic Host Platforms only") - - try: - core(module) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_image.py b/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_image.py deleted file mode 100644 index 350ad4c2..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_image.py +++ /dev/null @@ -1,169 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: atomic_image -short_description: Manage the container images on the atomic host platform -description: - - Manage the container images on the atomic host platform. - - Allows to execute the commands specified by the RUN label in the container image when present. -author: -- Saravanan KR (@krsacme) -notes: - - Host should support C(atomic) command. -requirements: - - atomic - - python >= 2.6 -options: - backend: - description: - - Define the backend where the image is pulled. - choices: [ 'docker', 'ostree' ] - type: str - name: - description: - - Name of the container image. - required: True - type: str - state: - description: - - The state of the container image. - - The state C(latest) will ensure container image is upgraded to the latest version and forcefully restart container, if running. - choices: [ 'absent', 'latest', 'present' ] - default: 'latest' - type: str - started: - description: - - Start or Stop the container. - type: bool - default: 'yes' -''' - -EXAMPLES = r''' -- name: Execute the run command on rsyslog container image (atomic run rhel7/rsyslog) - community.general.atomic_image: - name: rhel7/rsyslog - state: latest - -- name: Pull busybox to the OSTree backend - community.general.atomic_image: - name: busybox - state: latest - backend: ostree -''' - -RETURN = r''' -msg: - description: The command standard output - returned: always - type: str - sample: [u'Using default tag: latest ...'] -''' -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - - -def do_upgrade(module, image): - atomic_bin = module.get_bin_path('atomic') - args = [atomic_bin, 'update', '--force', image] - rc, out, err = module.run_command(args, check_rc=False) - if rc != 0: # something went wrong emit the msg - module.fail_json(rc=rc, msg=err) - elif 'Image is up to date' in out: - return False - - return True - - -def core(module): - image = module.params['name'] - state = module.params['state'] - started = module.params['started'] - backend = module.params['backend'] - is_upgraded = False - - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') - atomic_bin = module.get_bin_path('atomic') - out = {} - err = {} - rc = 0 - - if backend: - if state == 'present' or state == 'latest': - args = [atomic_bin, 'pull', "--storage=%s" % backend, image] - rc, out, err = module.run_command(args, check_rc=False) - if rc < 0: - module.fail_json(rc=rc, msg=err) - else: - out_run = "" - if started: - args = [atomic_bin, 'run', "--storage=%s" % backend, image] - rc, out_run, err = module.run_command(args, check_rc=False) - if rc < 0: - module.fail_json(rc=rc, msg=err) - - changed = "Extracting" in out or "Copying blob" in out - module.exit_json(msg=(out + out_run), changed=changed) - elif state == 'absent': - args = [atomic_bin, 'images', 'delete', "--storage=%s" % backend, image] - rc, out, err = module.run_command(args, check_rc=False) - if rc < 0: - module.fail_json(rc=rc, msg=err) - else: - changed = "Unable to find" not in out - module.exit_json(msg=out, changed=changed) - return - - if state == 'present' or state == 'latest': - if state == 'latest': - is_upgraded = do_upgrade(module, image) - - if started: - args = [atomic_bin, 'run', image] - else: - args = [atomic_bin, 'install', image] - elif state == 'absent': - args = [atomic_bin, 'uninstall', image] - - rc, out, err = module.run_command(args, check_rc=False) - - if rc < 0: - module.fail_json(rc=rc, msg=err) - elif rc == 1 and 'already present' in err: - module.exit_json(restult=err, changed=is_upgraded) - elif started and 'Container is running' in out: - module.exit_json(result=out, changed=is_upgraded) - else: - module.exit_json(msg=out, changed=True) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - backend=dict(type='str', choices=['docker', 'ostree']), - name=dict(type='str', required=True), - state=dict(type='str', default='latest', choices=['absent', 'latest', 'present']), - started=dict(type='bool', default=True), - ), - ) - - # Verify that the platform supports atomic command - dummy = module.get_bin_path('atomic', required=True) - - try: - core(module) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_aa_policy.py b/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_aa_policy.py deleted file mode 100644 index 416a4a6c..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_aa_policy.py +++ /dev/null @@ -1,345 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_aa_policy -short_description: Create or Delete Anti Affinity Policies at CenturyLink Cloud. -description: - - An Ansible module to Create or Delete Anti Affinity Policies at CenturyLink Cloud. -options: - name: - description: - - The name of the Anti Affinity Policy. - type: str - required: True - location: - description: - - Datacenter in which the policy lives/should live. - type: str - required: True - state: - description: - - Whether to create or delete the policy. - type: str - required: False - default: present - choices: ['present','absent'] -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - ---- -- name: Create AA Policy - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Create an Anti Affinity Policy - community.general.clc_aa_policy: - name: Hammer Time - location: UK3 - state: present - register: policy - - - name: Debug - ansible.builtin.debug: - var: policy - -- name: Delete AA Policy - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Delete an Anti Affinity Policy - community.general.clc_aa_policy: - name: Hammer Time - location: UK3 - state: absent - register: policy - - - name: Debug - ansible.builtin.debug: - var: policy -''' - -RETURN = ''' -policy: - description: The anti affinity policy information - returned: success - type: dict - sample: - { - "id":"1a28dd0988984d87b9cd61fa8da15424", - "name":"test_aa_policy", - "location":"UC1", - "links":[ - { - "rel":"self", - "href":"/v2/antiAffinityPolicies/wfad/1a28dd0988984d87b9cd61fa8da15424", - "verbs":[ - "GET", - "DELETE", - "PUT" - ] - }, - { - "rel":"location", - "href":"/v2/datacenters/wfad/UC1", - "id":"uc1", - "name":"UC1 - US West (Santa Clara)" - } - ] - } -''' - -__version__ = '${version}' - -import os -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk: -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcAntiAffinityPolicy: - - clc = clc_sdk - module = None - - def __init__(self, module): - """ - Construct module - """ - self.module = module - self.policy_dict = {} - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), - exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), - exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - name=dict(required=True), - location=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - ) - return argument_spec - - # Module Behavior Goodness - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - p = self.module.params - - self._set_clc_credentials_from_env() - self.policy_dict = self._get_policies_for_datacenter(p) - - if p['state'] == "absent": - changed, policy = self._ensure_policy_is_absent(p) - else: - changed, policy = self._ensure_policy_is_present(p) - - if hasattr(policy, 'data'): - policy = policy.data - elif hasattr(policy, '__dict__'): - policy = policy.__dict__ - - self.module.exit_json(changed=changed, policy=policy) - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _get_policies_for_datacenter(self, p): - """ - Get the Policies for a datacenter by calling the CLC API. - :param p: datacenter to get policies from - :return: policies in the datacenter - """ - response = {} - - policies = self.clc.v2.AntiAffinity.GetAll(location=p['location']) - - for policy in policies: - response[policy.name] = policy - return response - - def _create_policy(self, p): - """ - Create an Anti Affinity Policy using the CLC API. - :param p: datacenter to create policy in - :return: response dictionary from the CLC API. - """ - try: - return self.clc.v2.AntiAffinity.Create( - name=p['name'], - location=p['location']) - except CLCException as ex: - self.module.fail_json(msg='Failed to create anti affinity policy : {0}. {1}'.format( - p['name'], ex.response_text - )) - - def _delete_policy(self, p): - """ - Delete an Anti Affinity Policy using the CLC API. - :param p: datacenter to delete a policy from - :return: none - """ - try: - policy = self.policy_dict[p['name']] - policy.Delete() - except CLCException as ex: - self.module.fail_json(msg='Failed to delete anti affinity policy : {0}. {1}'.format( - p['name'], ex.response_text - )) - - def _policy_exists(self, policy_name): - """ - Check to see if an Anti Affinity Policy exists - :param policy_name: name of the policy - :return: boolean of if the policy exists - """ - if policy_name in self.policy_dict: - return self.policy_dict.get(policy_name) - - return False - - def _ensure_policy_is_absent(self, p): - """ - Makes sure that a policy is absent - :param p: dictionary of policy name - :return: tuple of if a deletion occurred and the name of the policy that was deleted - """ - changed = False - if self._policy_exists(policy_name=p['name']): - changed = True - if not self.module.check_mode: - self._delete_policy(p) - return changed, None - - def _ensure_policy_is_present(self, p): - """ - Ensures that a policy is present - :param p: dictionary of a policy name - :return: tuple of if an addition occurred and the name of the policy that was added - """ - changed = False - policy = self._policy_exists(policy_name=p['name']) - if not policy: - changed = True - policy = None - if not self.module.check_mode: - policy = self._create_policy(p) - return changed, policy - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - module = AnsibleModule( - argument_spec=ClcAntiAffinityPolicy._define_module_argument_spec(), - supports_check_mode=True) - clc_aa_policy = ClcAntiAffinityPolicy(module) - clc_aa_policy.process_request() - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_alert_policy.py b/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_alert_policy.py deleted file mode 100644 index b6ed6e96..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_alert_policy.py +++ /dev/null @@ -1,529 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_alert_policy -short_description: Create or Delete Alert Policies at CenturyLink Cloud. -description: - - An Ansible module to Create or Delete Alert Policies at CenturyLink Cloud. -options: - alias: - description: - - The alias of your CLC Account - type: str - required: True - name: - description: - - The name of the alert policy. This is mutually exclusive with id - type: str - id: - description: - - The alert policy id. This is mutually exclusive with name - type: str - alert_recipients: - description: - - A list of recipient email ids to notify the alert. - This is required for state 'present' - type: list - elements: str - metric: - description: - - The metric on which to measure the condition that will trigger the alert. - This is required for state 'present' - type: str - choices: ['cpu','memory','disk'] - duration: - description: - - The length of time in minutes that the condition must exceed the threshold. - This is required for state 'present' - type: str - threshold: - description: - - The threshold that will trigger the alert when the metric equals or exceeds it. - This is required for state 'present' - This number represents a percentage and must be a value between 5.0 - 95.0 that is a multiple of 5.0 - type: int - state: - description: - - Whether to create or delete the policy. - type: str - default: present - choices: ['present','absent'] -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - ---- -- name: Create Alert Policy Example - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Create an Alert Policy for disk above 80% for 5 minutes - community.general.clc_alert_policy: - alias: wfad - name: 'alert for disk > 80%' - alert_recipients: - - test1@centurylink.com - - test2@centurylink.com - metric: 'disk' - duration: '00:05:00' - threshold: 80 - state: present - register: policy - - - name: Debug - ansible.builtin.debug: var=policy - -- name: Delete Alert Policy Example - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Delete an Alert Policy - community.general.clc_alert_policy: - alias: wfad - name: 'alert for disk > 80%' - state: absent - register: policy - - - name: Debug - ansible.builtin.debug: var=policy -''' - -RETURN = ''' -policy: - description: The alert policy information - returned: success - type: dict - sample: - { - "actions": [ - { - "action": "email", - "settings": { - "recipients": [ - "user1@domain.com", - "user1@domain.com" - ] - } - } - ], - "id": "ba54ac54a60d4a4f1ed6d48c1ce240a7", - "links": [ - { - "href": "/v2/alertPolicies/alias/ba54ac54a60d4a4fb1d6d48c1ce240a7", - "rel": "self", - "verbs": [ - "GET", - "DELETE", - "PUT" - ] - } - ], - "name": "test_alert", - "triggers": [ - { - "duration": "00:05:00", - "metric": "disk", - "threshold": 80.0 - } - ] - } -''' - -__version__ = '${version}' - -import json -import os -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import APIFailedResponse -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcAlertPolicy: - - clc = clc_sdk - module = None - - def __init__(self, module): - """ - Construct module - """ - self.module = module - self.policy_dict = {} - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - name=dict(), - id=dict(), - alias=dict(required=True), - alert_recipients=dict(type='list', elements='str'), - metric=dict( - choices=[ - 'cpu', - 'memory', - 'disk'], - default=None), - duration=dict(type='str'), - threshold=dict(type='int'), - state=dict(default='present', choices=['present', 'absent']) - ) - mutually_exclusive = [ - ['name', 'id'] - ] - return {'argument_spec': argument_spec, - 'mutually_exclusive': mutually_exclusive} - - # Module Behavior Goodness - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - p = self.module.params - - self._set_clc_credentials_from_env() - self.policy_dict = self._get_alert_policies(p['alias']) - - if p['state'] == 'present': - changed, policy = self._ensure_alert_policy_is_present() - else: - changed, policy = self._ensure_alert_policy_is_absent() - - self.module.exit_json(changed=changed, policy=policy) - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _ensure_alert_policy_is_present(self): - """ - Ensures that the alert policy is present - :return: (changed, policy) - changed: A flag representing if anything is modified - policy: the created/updated alert policy - """ - changed = False - p = self.module.params - policy_name = p.get('name') - - if not policy_name: - self.module.fail_json(msg='Policy name is a required') - policy = self._alert_policy_exists(policy_name) - if not policy: - changed = True - policy = None - if not self.module.check_mode: - policy = self._create_alert_policy() - else: - changed_u, policy = self._ensure_alert_policy_is_updated(policy) - if changed_u: - changed = True - return changed, policy - - def _ensure_alert_policy_is_absent(self): - """ - Ensures that the alert policy is absent - :return: (changed, None) - changed: A flag representing if anything is modified - """ - changed = False - p = self.module.params - alert_policy_id = p.get('id') - alert_policy_name = p.get('name') - alias = p.get('alias') - if not alert_policy_id and not alert_policy_name: - self.module.fail_json( - msg='Either alert policy id or policy name is required') - if not alert_policy_id and alert_policy_name: - alert_policy_id = self._get_alert_policy_id( - self.module, - alert_policy_name) - if alert_policy_id and alert_policy_id in self.policy_dict: - changed = True - if not self.module.check_mode: - self._delete_alert_policy(alias, alert_policy_id) - return changed, None - - def _ensure_alert_policy_is_updated(self, alert_policy): - """ - Ensures the alert policy is updated if anything is changed in the alert policy configuration - :param alert_policy: the target alert policy - :return: (changed, policy) - changed: A flag representing if anything is modified - policy: the updated the alert policy - """ - changed = False - p = self.module.params - alert_policy_id = alert_policy.get('id') - email_list = p.get('alert_recipients') - metric = p.get('metric') - duration = p.get('duration') - threshold = p.get('threshold') - policy = alert_policy - if (metric and metric != str(alert_policy.get('triggers')[0].get('metric'))) or \ - (duration and duration != str(alert_policy.get('triggers')[0].get('duration'))) or \ - (threshold and float(threshold) != float(alert_policy.get('triggers')[0].get('threshold'))): - changed = True - elif email_list: - t_email_list = list( - alert_policy.get('actions')[0].get('settings').get('recipients')) - if set(email_list) != set(t_email_list): - changed = True - if changed and not self.module.check_mode: - policy = self._update_alert_policy(alert_policy_id) - return changed, policy - - def _get_alert_policies(self, alias): - """ - Get the alert policies for account alias by calling the CLC API. - :param alias: the account alias - :return: the alert policies for the account alias - """ - response = {} - - policies = self.clc.v2.API.Call('GET', - '/v2/alertPolicies/%s' - % alias) - - for policy in policies.get('items'): - response[policy.get('id')] = policy - return response - - def _create_alert_policy(self): - """ - Create an alert Policy using the CLC API. - :return: response dictionary from the CLC API. - """ - p = self.module.params - alias = p['alias'] - email_list = p['alert_recipients'] - metric = p['metric'] - duration = p['duration'] - threshold = p['threshold'] - policy_name = p['name'] - arguments = json.dumps( - { - 'name': policy_name, - 'actions': [{ - 'action': 'email', - 'settings': { - 'recipients': email_list - } - }], - 'triggers': [{ - 'metric': metric, - 'duration': duration, - 'threshold': threshold - }] - } - ) - try: - result = self.clc.v2.API.Call( - 'POST', - '/v2/alertPolicies/%s' % alias, - arguments) - except APIFailedResponse as e: - return self.module.fail_json( - msg='Unable to create alert policy "{0}". {1}'.format( - policy_name, str(e.response_text))) - return result - - def _update_alert_policy(self, alert_policy_id): - """ - Update alert policy using the CLC API. - :param alert_policy_id: The clc alert policy id - :return: response dictionary from the CLC API. - """ - p = self.module.params - alias = p['alias'] - email_list = p['alert_recipients'] - metric = p['metric'] - duration = p['duration'] - threshold = p['threshold'] - policy_name = p['name'] - arguments = json.dumps( - { - 'name': policy_name, - 'actions': [{ - 'action': 'email', - 'settings': { - 'recipients': email_list - } - }], - 'triggers': [{ - 'metric': metric, - 'duration': duration, - 'threshold': threshold - }] - } - ) - try: - result = self.clc.v2.API.Call( - 'PUT', '/v2/alertPolicies/%s/%s' % - (alias, alert_policy_id), arguments) - except APIFailedResponse as e: - return self.module.fail_json( - msg='Unable to update alert policy "{0}". {1}'.format( - policy_name, str(e.response_text))) - return result - - def _delete_alert_policy(self, alias, policy_id): - """ - Delete an alert policy using the CLC API. - :param alias : the account alias - :param policy_id: the alert policy id - :return: response dictionary from the CLC API. - """ - try: - result = self.clc.v2.API.Call( - 'DELETE', '/v2/alertPolicies/%s/%s' % - (alias, policy_id), None) - except APIFailedResponse as e: - return self.module.fail_json( - msg='Unable to delete alert policy id "{0}". {1}'.format( - policy_id, str(e.response_text))) - return result - - def _alert_policy_exists(self, policy_name): - """ - Check to see if an alert policy exists - :param policy_name: name of the alert policy - :return: boolean of if the policy exists - """ - result = False - for policy_id in self.policy_dict: - if self.policy_dict.get(policy_id).get('name') == policy_name: - result = self.policy_dict.get(policy_id) - return result - - def _get_alert_policy_id(self, module, alert_policy_name): - """ - retrieves the alert policy id of the account based on the name of the policy - :param module: the AnsibleModule object - :param alert_policy_name: the alert policy name - :return: alert_policy_id: The alert policy id - """ - alert_policy_id = None - for policy_id in self.policy_dict: - if self.policy_dict.get(policy_id).get('name') == alert_policy_name: - if not alert_policy_id: - alert_policy_id = policy_id - else: - return module.fail_json( - msg='multiple alert policies were found with policy name : %s' % alert_policy_name) - return alert_policy_id - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - argument_dict = ClcAlertPolicy._define_module_argument_spec() - module = AnsibleModule(supports_check_mode=True, **argument_dict) - clc_alert_policy = ClcAlertPolicy(module) - clc_alert_policy.process_request() - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_blueprint_package.py b/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_blueprint_package.py deleted file mode 100644 index 9e0bfa80..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_blueprint_package.py +++ /dev/null @@ -1,301 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_blueprint_package -short_description: deploys a blue print package on a set of servers in CenturyLink Cloud. -description: - - An Ansible module to deploy blue print package on a set of servers in CenturyLink Cloud. -options: - server_ids: - description: - - A list of server Ids to deploy the blue print package. - type: list - required: True - elements: str - package_id: - description: - - The package id of the blue print. - type: str - required: True - package_params: - description: - - The dictionary of arguments required to deploy the blue print. - type: dict - default: {} - required: False - state: - description: - - Whether to install or uninstall the package. Currently it supports only "present" for install action. - type: str - required: False - default: present - choices: ['present'] - wait: - description: - - Whether to wait for the tasks to finish before returning. - type: str - default: 'True' - required: False -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - -- name: Deploy package - community.general.clc_blueprint_package: - server_ids: - - UC1TEST-SERVER1 - - UC1TEST-SERVER2 - package_id: 77abb844-579d-478d-3955-c69ab4a7ba1a - package_params: {} -''' - -RETURN = ''' -server_ids: - description: The list of server ids that are changed - returned: success - type: list - sample: - [ - "UC1TEST-SERVER1", - "UC1TEST-SERVER2" - ] -''' - -__version__ = '${version}' - -import os -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcBlueprintPackage: - - clc = clc_sdk - module = None - - def __init__(self, module): - """ - Construct module - """ - self.module = module - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - p = self.module.params - changed = False - changed_server_ids = [] - self._set_clc_credentials_from_env() - server_ids = p['server_ids'] - package_id = p['package_id'] - package_params = p['package_params'] - state = p['state'] - if state == 'present': - changed, changed_server_ids, request_list = self.ensure_package_installed( - server_ids, package_id, package_params) - self._wait_for_requests_to_complete(request_list) - self.module.exit_json(changed=changed, server_ids=changed_server_ids) - - @staticmethod - def define_argument_spec(): - """ - This function defines the dictionary object required for - package module - :return: the package dictionary object - """ - argument_spec = dict( - server_ids=dict(type='list', elements='str', required=True), - package_id=dict(required=True), - package_params=dict(type='dict', default={}), - wait=dict(default=True), # @FIXME should be bool? - state=dict(default='present', choices=['present']) - ) - return argument_spec - - def ensure_package_installed(self, server_ids, package_id, package_params): - """ - Ensure the package is installed in the given list of servers - :param server_ids: the server list where the package needs to be installed - :param package_id: the blueprint package id - :param package_params: the package arguments - :return: (changed, server_ids, request_list) - changed: A flag indicating if a change was made - server_ids: The list of servers modified - request_list: The list of request objects from clc-sdk - """ - changed = False - request_list = [] - servers = self._get_servers_from_clc( - server_ids, - 'Failed to get servers from CLC') - for server in servers: - if not self.module.check_mode: - request = self.clc_install_package( - server, - package_id, - package_params) - request_list.append(request) - changed = True - return changed, server_ids, request_list - - def clc_install_package(self, server, package_id, package_params): - """ - Install the package to a given clc server - :param server: The server object where the package needs to be installed - :param package_id: The blue print package id - :param package_params: the required argument dict for the package installation - :return: The result object from the CLC API call - """ - result = None - try: - result = server.ExecutePackage( - package_id=package_id, - parameters=package_params) - except CLCException as ex: - self.module.fail_json(msg='Failed to install package : {0} to server {1}. {2}'.format( - package_id, server.id, ex.message - )) - return result - - def _wait_for_requests_to_complete(self, request_lst): - """ - Waits until the CLC requests are complete if the wait argument is True - :param request_lst: The list of CLC request objects - :return: none - """ - if not self.module.params['wait']: - return - for request in request_lst: - request.WaitUntilComplete() - for request_details in request.requests: - if request_details.Status() != 'succeeded': - self.module.fail_json( - msg='Unable to process package install request') - - def _get_servers_from_clc(self, server_list, message): - """ - Internal function to fetch list of CLC server objects from a list of server ids - :param server_list: the list of server ids - :param message: the error message to raise if there is any error - :return the list of CLC server objects - """ - try: - return self.clc.v2.Servers(server_list).servers - except CLCException as ex: - self.module.fail_json(msg=message + ': %s' % ex) - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - Main function - :return: None - """ - module = AnsibleModule( - argument_spec=ClcBlueprintPackage.define_argument_spec(), - supports_check_mode=True - ) - clc_blueprint_package = ClcBlueprintPackage(module) - clc_blueprint_package.process_request() - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_firewall_policy.py b/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_firewall_policy.py deleted file mode 100644 index f1f4a2f2..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_firewall_policy.py +++ /dev/null @@ -1,588 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_firewall_policy -short_description: Create/delete/update firewall policies -description: - - Create or delete or update firewall policies on Centurylink Cloud -options: - location: - description: - - Target datacenter for the firewall policy - type: str - required: True - state: - description: - - Whether to create or delete the firewall policy - type: str - default: present - choices: ['present', 'absent'] - source: - description: - - The list of source addresses for traffic on the originating firewall. - This is required when state is 'present' - type: list - elements: str - destination: - description: - - The list of destination addresses for traffic on the terminating firewall. - This is required when state is 'present' - type: list - elements: str - ports: - description: - - The list of ports associated with the policy. - TCP and UDP can take in single ports or port ranges. - - "Example: C(['any', 'icmp', 'TCP/123', 'UDP/123', 'TCP/123-456', 'UDP/123-456'])." - type: list - elements: str - firewall_policy_id: - description: - - Id of the firewall policy. This is required to update or delete an existing firewall policy - type: str - source_account_alias: - description: - - CLC alias for the source account - type: str - required: True - destination_account_alias: - description: - - CLC alias for the destination account - type: str - wait: - description: - - Whether to wait for the provisioning tasks to finish before returning. - type: str - default: 'True' - enabled: - description: - - Whether the firewall policy is enabled or disabled - type: str - choices: ['True', 'False'] - default: 'True' -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' ---- -- name: Create Firewall Policy - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Create / Verify an Firewall Policy at CenturyLink Cloud - clc_firewall: - source_account_alias: WFAD - location: VA1 - state: present - source: 10.128.216.0/24 - destination: 10.128.216.0/24 - ports: Any - destination_account_alias: WFAD - -- name: Delete Firewall Policy - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Delete an Firewall Policy at CenturyLink Cloud - clc_firewall: - source_account_alias: WFAD - location: VA1 - state: absent - firewall_policy_id: c62105233d7a4231bd2e91b9c791e43e1 -''' - -RETURN = ''' -firewall_policy_id: - description: The fire wall policy id - returned: success - type: str - sample: fc36f1bfd47242e488a9c44346438c05 -firewall_policy: - description: The fire wall policy information - returned: success - type: dict - sample: - { - "destination":[ - "10.1.1.0/24", - "10.2.2.0/24" - ], - "destinationAccount":"wfad", - "enabled":true, - "id":"fc36f1bfd47242e488a9c44346438c05", - "links":[ - { - "href":"http://api.ctl.io/v2-experimental/firewallPolicies/wfad/uc1/fc36f1bfd47242e488a9c44346438c05", - "rel":"self", - "verbs":[ - "GET", - "PUT", - "DELETE" - ] - } - ], - "ports":[ - "any" - ], - "source":[ - "10.1.1.0/24", - "10.2.2.0/24" - ], - "status":"active" - } -''' - -__version__ = '${version}' - -import os -import traceback -from ansible.module_utils.six.moves.urllib.parse import urlparse -from time import sleep - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import APIFailedResponse -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcFirewallPolicy: - - clc = None - - def __init__(self, module): - """ - Construct module - """ - self.clc = clc_sdk - self.module = module - self.firewall_dict = {} - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - location=dict(required=True), - source_account_alias=dict(required=True), - destination_account_alias=dict(), - firewall_policy_id=dict(), - ports=dict(type='list', elements='str'), - source=dict(type='list', elements='str'), - destination=dict(type='list', elements='str'), - wait=dict(default=True), # @FIXME type=bool - state=dict(default='present', choices=['present', 'absent']), - enabled=dict(default=True, choices=[True, False]) - ) - return argument_spec - - def process_request(self): - """ - Execute the main code path, and handle the request - :return: none - """ - changed = False - firewall_policy = None - location = self.module.params.get('location') - source_account_alias = self.module.params.get('source_account_alias') - destination_account_alias = self.module.params.get( - 'destination_account_alias') - firewall_policy_id = self.module.params.get('firewall_policy_id') - ports = self.module.params.get('ports') - source = self.module.params.get('source') - destination = self.module.params.get('destination') - wait = self.module.params.get('wait') - state = self.module.params.get('state') - enabled = self.module.params.get('enabled') - - self.firewall_dict = { - 'location': location, - 'source_account_alias': source_account_alias, - 'destination_account_alias': destination_account_alias, - 'firewall_policy_id': firewall_policy_id, - 'ports': ports, - 'source': source, - 'destination': destination, - 'wait': wait, - 'state': state, - 'enabled': enabled} - - self._set_clc_credentials_from_env() - - if state == 'absent': - changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_absent( - source_account_alias, location, self.firewall_dict) - - elif state == 'present': - changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_present( - source_account_alias, location, self.firewall_dict) - - return self.module.exit_json( - changed=changed, - firewall_policy_id=firewall_policy_id, - firewall_policy=firewall_policy) - - @staticmethod - def _get_policy_id_from_response(response): - """ - Method to parse out the policy id from creation response - :param response: response from firewall creation API call - :return: policy_id: firewall policy id from creation call - """ - url = response.get('links')[0]['href'] - path = urlparse(url).path - path_list = os.path.split(path) - policy_id = path_list[-1] - return policy_id - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _ensure_firewall_policy_is_present( - self, - source_account_alias, - location, - firewall_dict): - """ - Ensures that a given firewall policy is present - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_dict: dictionary of request parameters for firewall policy - :return: (changed, firewall_policy_id, firewall_policy) - changed: flag for if a change occurred - firewall_policy_id: the firewall policy id that was created/updated - firewall_policy: The firewall_policy object - """ - firewall_policy = None - firewall_policy_id = firewall_dict.get('firewall_policy_id') - - if firewall_policy_id is None: - if not self.module.check_mode: - response = self._create_firewall_policy( - source_account_alias, - location, - firewall_dict) - firewall_policy_id = self._get_policy_id_from_response( - response) - changed = True - else: - firewall_policy = self._get_firewall_policy( - source_account_alias, location, firewall_policy_id) - if not firewall_policy: - return self.module.fail_json( - msg='Unable to find the firewall policy id : {0}'.format( - firewall_policy_id)) - changed = self._compare_get_request_with_dict( - firewall_policy, - firewall_dict) - if not self.module.check_mode and changed: - self._update_firewall_policy( - source_account_alias, - location, - firewall_policy_id, - firewall_dict) - if changed and firewall_policy_id: - firewall_policy = self._wait_for_requests_to_complete( - source_account_alias, - location, - firewall_policy_id) - return changed, firewall_policy_id, firewall_policy - - def _ensure_firewall_policy_is_absent( - self, - source_account_alias, - location, - firewall_dict): - """ - Ensures that a given firewall policy is removed if present - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_dict: firewall policy to delete - :return: (changed, firewall_policy_id, response) - changed: flag for if a change occurred - firewall_policy_id: the firewall policy id that was deleted - response: response from CLC API call - """ - changed = False - response = [] - firewall_policy_id = firewall_dict.get('firewall_policy_id') - result = self._get_firewall_policy( - source_account_alias, location, firewall_policy_id) - if result: - if not self.module.check_mode: - response = self._delete_firewall_policy( - source_account_alias, - location, - firewall_policy_id) - changed = True - return changed, firewall_policy_id, response - - def _create_firewall_policy( - self, - source_account_alias, - location, - firewall_dict): - """ - Creates the firewall policy for the given account alias - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_dict: dictionary of request parameters for firewall policy - :return: response from CLC API call - """ - payload = { - 'destinationAccount': firewall_dict.get('destination_account_alias'), - 'source': firewall_dict.get('source'), - 'destination': firewall_dict.get('destination'), - 'ports': firewall_dict.get('ports')} - try: - response = self.clc.v2.API.Call( - 'POST', '/v2-experimental/firewallPolicies/%s/%s' % - (source_account_alias, location), payload) - except APIFailedResponse as e: - return self.module.fail_json( - msg="Unable to create firewall policy. %s" % - str(e.response_text)) - return response - - def _delete_firewall_policy( - self, - source_account_alias, - location, - firewall_policy_id): - """ - Deletes a given firewall policy for an account alias in a datacenter - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_policy_id: firewall policy id to delete - :return: response: response from CLC API call - """ - try: - response = self.clc.v2.API.Call( - 'DELETE', '/v2-experimental/firewallPolicies/%s/%s/%s' % - (source_account_alias, location, firewall_policy_id)) - except APIFailedResponse as e: - return self.module.fail_json( - msg="Unable to delete the firewall policy id : {0}. {1}".format( - firewall_policy_id, str(e.response_text))) - return response - - def _update_firewall_policy( - self, - source_account_alias, - location, - firewall_policy_id, - firewall_dict): - """ - Updates a firewall policy for a given datacenter and account alias - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_policy_id: firewall policy id to update - :param firewall_dict: dictionary of request parameters for firewall policy - :return: response: response from CLC API call - """ - try: - response = self.clc.v2.API.Call( - 'PUT', - '/v2-experimental/firewallPolicies/%s/%s/%s' % - (source_account_alias, - location, - firewall_policy_id), - firewall_dict) - except APIFailedResponse as e: - return self.module.fail_json( - msg="Unable to update the firewall policy id : {0}. {1}".format( - firewall_policy_id, str(e.response_text))) - return response - - @staticmethod - def _compare_get_request_with_dict(response, firewall_dict): - """ - Helper method to compare the json response for getting the firewall policy with the request parameters - :param response: response from the get method - :param firewall_dict: dictionary of request parameters for firewall policy - :return: changed: Boolean that returns true if there are differences between - the response parameters and the playbook parameters - """ - - changed = False - - response_dest_account_alias = response.get('destinationAccount') - response_enabled = response.get('enabled') - response_source = response.get('source') - response_dest = response.get('destination') - response_ports = response.get('ports') - request_dest_account_alias = firewall_dict.get( - 'destination_account_alias') - request_enabled = firewall_dict.get('enabled') - if request_enabled is None: - request_enabled = True - request_source = firewall_dict.get('source') - request_dest = firewall_dict.get('destination') - request_ports = firewall_dict.get('ports') - - if ( - response_dest_account_alias and str(response_dest_account_alias) != str(request_dest_account_alias)) or ( - response_enabled != request_enabled) or ( - response_source and response_source != request_source) or ( - response_dest and response_dest != request_dest) or ( - response_ports and response_ports != request_ports): - changed = True - return changed - - def _get_firewall_policy( - self, - source_account_alias, - location, - firewall_policy_id): - """ - Get back details for a particular firewall policy - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_policy_id: id of the firewall policy to get - :return: response - The response from CLC API call - """ - response = None - try: - response = self.clc.v2.API.Call( - 'GET', '/v2-experimental/firewallPolicies/%s/%s/%s' % - (source_account_alias, location, firewall_policy_id)) - except APIFailedResponse as e: - if e.response_status_code != 404: - self.module.fail_json( - msg="Unable to fetch the firewall policy with id : {0}. {1}".format( - firewall_policy_id, str(e.response_text))) - return response - - def _wait_for_requests_to_complete( - self, - source_account_alias, - location, - firewall_policy_id, - wait_limit=50): - """ - Waits until the CLC requests are complete if the wait argument is True - :param source_account_alias: The source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_policy_id: The firewall policy id - :param wait_limit: The number of times to check the status for completion - :return: the firewall_policy object - """ - wait = self.module.params.get('wait') - count = 0 - firewall_policy = None - while wait: - count += 1 - firewall_policy = self._get_firewall_policy( - source_account_alias, location, firewall_policy_id) - status = firewall_policy.get('status') - if status == 'active' or count > wait_limit: - wait = False - else: - # wait for 2 seconds - sleep(2) - return firewall_policy - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - module = AnsibleModule( - argument_spec=ClcFirewallPolicy._define_module_argument_spec(), - supports_check_mode=True) - - clc_firewall = ClcFirewallPolicy(module) - clc_firewall.process_request() - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_group.py b/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_group.py deleted file mode 100644 index 312c6269..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_group.py +++ /dev/null @@ -1,514 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_group -short_description: Create/delete Server Groups at Centurylink Cloud -description: - - Create or delete Server Groups at Centurylink Centurylink Cloud -options: - name: - description: - - The name of the Server Group - type: str - required: True - description: - description: - - A description of the Server Group - type: str - required: False - parent: - description: - - The parent group of the server group. If parent is not provided, it creates the group at top level. - type: str - required: False - location: - description: - - Datacenter to create the group in. If location is not provided, the group gets created in the default datacenter - associated with the account - type: str - required: False - state: - description: - - Whether to create or delete the group - type: str - default: present - choices: ['present', 'absent'] - wait: - description: - - Whether to wait for the tasks to finish before returning. - type: bool - default: True - required: False -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' - -# Create a Server Group - ---- -- name: Create Server Group - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Create / Verify a Server Group at CenturyLink Cloud - community.general.clc_group: - name: My Cool Server Group - parent: Default Group - state: present - register: clc - - - name: Debug - ansible.builtin.debug: - var: clc - -# Delete a Server Group -- name: Delete Server Group - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Delete / Verify Absent a Server Group at CenturyLink Cloud - community.general.clc_group: - name: My Cool Server Group - parent: Default Group - state: absent - register: clc - - - name: Debug - ansible.builtin.debug: - var: clc -''' - -RETURN = ''' -group: - description: The group information - returned: success - type: dict - sample: - { - "changeInfo":{ - "createdBy":"service.wfad", - "createdDate":"2015-07-29T18:52:47Z", - "modifiedBy":"service.wfad", - "modifiedDate":"2015-07-29T18:52:47Z" - }, - "customFields":[ - - ], - "description":"test group", - "groups":[ - - ], - "id":"bb5f12a3c6044ae4ad0a03e73ae12cd1", - "links":[ - { - "href":"/v2/groups/wfad", - "rel":"createGroup", - "verbs":[ - "POST" - ] - }, - { - "href":"/v2/servers/wfad", - "rel":"createServer", - "verbs":[ - "POST" - ] - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1", - "rel":"self", - "verbs":[ - "GET", - "PATCH", - "DELETE" - ] - }, - { - "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0", - "id":"086ac1dfe0b6411989e8d1b77c4065f0", - "rel":"parentGroup" - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/defaults", - "rel":"defaults", - "verbs":[ - "GET", - "POST" - ] - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/billing", - "rel":"billing" - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/archive", - "rel":"archiveGroupAction" - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/statistics", - "rel":"statistics" - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/upcomingScheduledActivities", - "rel":"upcomingScheduledActivities" - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/horizontalAutoscalePolicy", - "rel":"horizontalAutoscalePolicyMapping", - "verbs":[ - "GET", - "PUT", - "DELETE" - ] - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/scheduledActivities", - "rel":"scheduledActivities", - "verbs":[ - "GET", - "POST" - ] - } - ], - "locationId":"UC1", - "name":"test group", - "status":"active", - "type":"default" - } -''' - -__version__ = '${version}' - -import os -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcGroup(object): - - clc = None - root_group = None - - def __init__(self, module): - """ - Construct module - """ - self.clc = clc_sdk - self.module = module - self.group_dict = {} - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Execute the main code path, and handle the request - :return: none - """ - location = self.module.params.get('location') - group_name = self.module.params.get('name') - parent_name = self.module.params.get('parent') - group_description = self.module.params.get('description') - state = self.module.params.get('state') - - self._set_clc_credentials_from_env() - self.group_dict = self._get_group_tree_for_datacenter( - datacenter=location) - - if state == "absent": - changed, group, requests = self._ensure_group_is_absent( - group_name=group_name, parent_name=parent_name) - if requests: - self._wait_for_requests_to_complete(requests) - else: - changed, group = self._ensure_group_is_present( - group_name=group_name, parent_name=parent_name, group_description=group_description) - try: - group = group.data - except AttributeError: - group = group_name - self.module.exit_json(changed=changed, group=group) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - name=dict(required=True), - description=dict(default=None), - parent=dict(default=None), - location=dict(default=None), - state=dict(default='present', choices=['present', 'absent']), - wait=dict(type='bool', default=True)) - - return argument_spec - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _ensure_group_is_absent(self, group_name, parent_name): - """ - Ensure that group_name is absent by deleting it if necessary - :param group_name: string - the name of the clc server group to delete - :param parent_name: string - the name of the parent group for group_name - :return: changed, group - """ - changed = False - group = [] - results = [] - - if self._group_exists(group_name=group_name, parent_name=parent_name): - if not self.module.check_mode: - group.append(group_name) - result = self._delete_group(group_name) - results.append(result) - changed = True - return changed, group, results - - def _delete_group(self, group_name): - """ - Delete the provided server group - :param group_name: string - the server group to delete - :return: none - """ - response = None - group, parent = self.group_dict.get(group_name) - try: - response = group.Delete() - except CLCException as ex: - self.module.fail_json(msg='Failed to delete group :{0}. {1}'.format( - group_name, ex.response_text - )) - return response - - def _ensure_group_is_present( - self, - group_name, - parent_name, - group_description): - """ - Checks to see if a server group exists, creates it if it doesn't. - :param group_name: the name of the group to validate/create - :param parent_name: the name of the parent group for group_name - :param group_description: a short description of the server group (used when creating) - :return: (changed, group) - - changed: Boolean- whether a change was made, - group: A clc group object for the group - """ - if not self.root_group: - raise AssertionError("Implementation Error: Root Group not set") - parent = parent_name if parent_name is not None else self.root_group.name - description = group_description - changed = False - group = group_name - - parent_exists = self._group_exists(group_name=parent, parent_name=None) - child_exists = self._group_exists( - group_name=group_name, - parent_name=parent) - - if parent_exists and child_exists: - group, parent = self.group_dict[group_name] - changed = False - elif parent_exists and not child_exists: - if not self.module.check_mode: - group = self._create_group( - group=group, - parent=parent, - description=description) - changed = True - else: - self.module.fail_json( - msg="parent group: " + - parent + - " does not exist") - - return changed, group - - def _create_group(self, group, parent, description): - """ - Create the provided server group - :param group: clc_sdk.Group - the group to create - :param parent: clc_sdk.Parent - the parent group for {group} - :param description: string - a text description of the group - :return: clc_sdk.Group - the created group - """ - response = None - (parent, grandparent) = self.group_dict[parent] - try: - response = parent.Create(name=group, description=description) - except CLCException as ex: - self.module.fail_json(msg='Failed to create group :{0}. {1}'.format( - group, ex.response_text)) - return response - - def _group_exists(self, group_name, parent_name): - """ - Check to see if a group exists - :param group_name: string - the group to check - :param parent_name: string - the parent of group_name - :return: boolean - whether the group exists - """ - result = False - if group_name in self.group_dict: - (group, parent) = self.group_dict[group_name] - if parent_name is None or parent_name == parent.name: - result = True - return result - - def _get_group_tree_for_datacenter(self, datacenter=None): - """ - Walk the tree of groups for a datacenter - :param datacenter: string - the datacenter to walk (ex: 'UC1') - :return: a dictionary of groups and parents - """ - self.root_group = self.clc.v2.Datacenter( - location=datacenter).RootGroup() - return self._walk_groups_recursive( - parent_group=None, - child_group=self.root_group) - - def _walk_groups_recursive(self, parent_group, child_group): - """ - Walk a parent-child tree of groups, starting with the provided child group - :param parent_group: clc_sdk.Group - the parent group to start the walk - :param child_group: clc_sdk.Group - the child group to start the walk - :return: a dictionary of groups and parents - """ - result = {str(child_group): (child_group, parent_group)} - groups = child_group.Subgroups().groups - if len(groups) > 0: - for group in groups: - if group.type != 'default': - continue - - result.update(self._walk_groups_recursive(child_group, group)) - return result - - def _wait_for_requests_to_complete(self, requests_lst): - """ - Waits until the CLC requests are complete if the wait argument is True - :param requests_lst: The list of CLC request objects - :return: none - """ - if not self.module.params['wait']: - return - for request in requests_lst: - request.WaitUntilComplete() - for request_details in request.requests: - if request_details.Status() != 'succeeded': - self.module.fail_json( - msg='Unable to process group request') - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - module = AnsibleModule( - argument_spec=ClcGroup._define_module_argument_spec(), - supports_check_mode=True) - - clc_group = ClcGroup(module) - clc_group.process_request() - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_loadbalancer.py b/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_loadbalancer.py deleted file mode 100644 index 656f4363..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_loadbalancer.py +++ /dev/null @@ -1,937 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_loadbalancer -short_description: Create, Delete shared loadbalancers in CenturyLink Cloud. -description: - - An Ansible module to Create, Delete shared loadbalancers in CenturyLink Cloud. -options: - name: - description: - - The name of the loadbalancer - type: str - required: True - description: - description: - - A description for the loadbalancer - type: str - alias: - description: - - The alias of your CLC Account - type: str - required: True - location: - description: - - The location of the datacenter where the load balancer resides in - type: str - required: True - method: - description: - -The balancing method for the load balancer pool - type: str - choices: ['leastConnection', 'roundRobin'] - persistence: - description: - - The persistence method for the load balancer - type: str - choices: ['standard', 'sticky'] - port: - description: - - Port to configure on the public-facing side of the load balancer pool - type: str - choices: ['80', '443'] - nodes: - description: - - A list of nodes that needs to be added to the load balancer pool - type: list - default: [] - elements: dict - status: - description: - - The status of the loadbalancer - type: str - default: enabled - choices: ['enabled', 'disabled'] - state: - description: - - Whether to create or delete the load balancer pool - type: str - default: present - choices: ['present', 'absent', 'port_absent', 'nodes_present', 'nodes_absent'] -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples -- name: Create Loadbalancer - hosts: localhost - connection: local - tasks: - - name: Actually Create things - community.general.clc_loadbalancer: - name: test - description: test - alias: TEST - location: WA1 - port: 443 - nodes: - - ipAddress: 10.11.22.123 - privatePort: 80 - state: present - -- name: Add node to an existing loadbalancer pool - hosts: localhost - connection: local - tasks: - - name: Actually Create things - community.general.clc_loadbalancer: - name: test - description: test - alias: TEST - location: WA1 - port: 443 - nodes: - - ipAddress: 10.11.22.234 - privatePort: 80 - state: nodes_present - -- name: Remove node from an existing loadbalancer pool - hosts: localhost - connection: local - tasks: - - name: Actually Create things - community.general.clc_loadbalancer: - name: test - description: test - alias: TEST - location: WA1 - port: 443 - nodes: - - ipAddress: 10.11.22.234 - privatePort: 80 - state: nodes_absent - -- name: Delete LoadbalancerPool - hosts: localhost - connection: local - tasks: - - name: Actually Delete things - community.general.clc_loadbalancer: - name: test - description: test - alias: TEST - location: WA1 - port: 443 - nodes: - - ipAddress: 10.11.22.123 - privatePort: 80 - state: port_absent - -- name: Delete Loadbalancer - hosts: localhost - connection: local - tasks: - - name: Actually Delete things - community.general.clc_loadbalancer: - name: test - description: test - alias: TEST - location: WA1 - port: 443 - nodes: - - ipAddress: 10.11.22.123 - privatePort: 80 - state: absent -''' - -RETURN = ''' -loadbalancer: - description: The load balancer result object from CLC - returned: success - type: dict - sample: - { - "description":"test-lb", - "id":"ab5b18cb81e94ab9925b61d1ca043fb5", - "ipAddress":"66.150.174.197", - "links":[ - { - "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5", - "rel":"self", - "verbs":[ - "GET", - "PUT", - "DELETE" - ] - }, - { - "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5/pools", - "rel":"pools", - "verbs":[ - "GET", - "POST" - ] - } - ], - "name":"test-lb", - "pools":[ - - ], - "status":"enabled" - } -''' - -__version__ = '${version}' - -import json -import os -import traceback -from time import sleep - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import APIFailedResponse -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcLoadBalancer: - - clc = None - - def __init__(self, module): - """ - Construct module - """ - self.clc = clc_sdk - self.module = module - self.lb_dict = {} - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Execute the main code path, and handle the request - :return: none - """ - changed = False - result_lb = None - loadbalancer_name = self.module.params.get('name') - loadbalancer_alias = self.module.params.get('alias') - loadbalancer_location = self.module.params.get('location') - loadbalancer_description = self.module.params.get('description') - loadbalancer_port = self.module.params.get('port') - loadbalancer_method = self.module.params.get('method') - loadbalancer_persistence = self.module.params.get('persistence') - loadbalancer_nodes = self.module.params.get('nodes') - loadbalancer_status = self.module.params.get('status') - state = self.module.params.get('state') - - if loadbalancer_description is None: - loadbalancer_description = loadbalancer_name - - self._set_clc_credentials_from_env() - - self.lb_dict = self._get_loadbalancer_list( - alias=loadbalancer_alias, - location=loadbalancer_location) - - if state == 'present': - changed, result_lb, lb_id = self.ensure_loadbalancer_present( - name=loadbalancer_name, - alias=loadbalancer_alias, - location=loadbalancer_location, - description=loadbalancer_description, - status=loadbalancer_status) - if loadbalancer_port: - changed, result_pool, pool_id = self.ensure_loadbalancerpool_present( - lb_id=lb_id, - alias=loadbalancer_alias, - location=loadbalancer_location, - method=loadbalancer_method, - persistence=loadbalancer_persistence, - port=loadbalancer_port) - - if loadbalancer_nodes: - changed, result_nodes = self.ensure_lbpool_nodes_set( - alias=loadbalancer_alias, - location=loadbalancer_location, - name=loadbalancer_name, - port=loadbalancer_port, - nodes=loadbalancer_nodes) - elif state == 'absent': - changed, result_lb = self.ensure_loadbalancer_absent( - name=loadbalancer_name, - alias=loadbalancer_alias, - location=loadbalancer_location) - - elif state == 'port_absent': - changed, result_lb = self.ensure_loadbalancerpool_absent( - alias=loadbalancer_alias, - location=loadbalancer_location, - name=loadbalancer_name, - port=loadbalancer_port) - - elif state == 'nodes_present': - changed, result_lb = self.ensure_lbpool_nodes_present( - alias=loadbalancer_alias, - location=loadbalancer_location, - name=loadbalancer_name, - port=loadbalancer_port, - nodes=loadbalancer_nodes) - - elif state == 'nodes_absent': - changed, result_lb = self.ensure_lbpool_nodes_absent( - alias=loadbalancer_alias, - location=loadbalancer_location, - name=loadbalancer_name, - port=loadbalancer_port, - nodes=loadbalancer_nodes) - - self.module.exit_json(changed=changed, loadbalancer=result_lb) - - def ensure_loadbalancer_present( - self, name, alias, location, description, status): - """ - Checks to see if a load balancer exists and creates one if it does not. - :param name: Name of loadbalancer - :param alias: Alias of account - :param location: Datacenter - :param description: Description of loadbalancer - :param status: Enabled / Disabled - :return: (changed, result, lb_id) - changed: Boolean whether a change was made - result: The result object from the CLC load balancer request - lb_id: The load balancer id - """ - changed = False - result = name - lb_id = self._loadbalancer_exists(name=name) - if not lb_id: - if not self.module.check_mode: - result = self.create_loadbalancer(name=name, - alias=alias, - location=location, - description=description, - status=status) - lb_id = result.get('id') - changed = True - - return changed, result, lb_id - - def ensure_loadbalancerpool_present( - self, lb_id, alias, location, method, persistence, port): - """ - Checks to see if a load balancer pool exists and creates one if it does not. - :param lb_id: The loadbalancer id - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param method: the load balancing method - :param persistence: the load balancing persistence type - :param port: the port that the load balancer will listen on - :return: (changed, group, pool_id) - - changed: Boolean whether a change was made - result: The result from the CLC API call - pool_id: The string id of the load balancer pool - """ - changed = False - result = port - if not lb_id: - return changed, None, None - pool_id = self._loadbalancerpool_exists( - alias=alias, - location=location, - port=port, - lb_id=lb_id) - if not pool_id: - if not self.module.check_mode: - result = self.create_loadbalancerpool( - alias=alias, - location=location, - lb_id=lb_id, - method=method, - persistence=persistence, - port=port) - pool_id = result.get('id') - changed = True - - return changed, result, pool_id - - def ensure_loadbalancer_absent(self, name, alias, location): - """ - Checks to see if a load balancer exists and deletes it if it does - :param name: Name of the load balancer - :param alias: Alias of account - :param location: Datacenter - :return: (changed, result) - changed: Boolean whether a change was made - result: The result from the CLC API Call - """ - changed = False - result = name - lb_exists = self._loadbalancer_exists(name=name) - if lb_exists: - if not self.module.check_mode: - result = self.delete_loadbalancer(alias=alias, - location=location, - name=name) - changed = True - return changed, result - - def ensure_loadbalancerpool_absent(self, alias, location, name, port): - """ - Checks to see if a load balancer pool exists and deletes it if it does - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param name: the name of the load balancer - :param port: the port that the load balancer listens on - :return: (changed, result) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - result = None - lb_exists = self._loadbalancer_exists(name=name) - if lb_exists: - lb_id = self._get_loadbalancer_id(name=name) - pool_id = self._loadbalancerpool_exists( - alias=alias, - location=location, - port=port, - lb_id=lb_id) - if pool_id: - changed = True - if not self.module.check_mode: - result = self.delete_loadbalancerpool( - alias=alias, - location=location, - lb_id=lb_id, - pool_id=pool_id) - else: - result = "Pool doesn't exist" - else: - result = "LB Doesn't Exist" - return changed, result - - def ensure_lbpool_nodes_set(self, alias, location, name, port, nodes): - """ - Checks to see if the provided list of nodes exist for the pool - and set the nodes if any in the list those doesn't exist - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param name: the name of the load balancer - :param port: the port that the load balancer will listen on - :param nodes: The list of nodes to be updated to the pool - :return: (changed, result) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - result = {} - changed = False - lb_exists = self._loadbalancer_exists(name=name) - if lb_exists: - lb_id = self._get_loadbalancer_id(name=name) - pool_id = self._loadbalancerpool_exists( - alias=alias, - location=location, - port=port, - lb_id=lb_id) - if pool_id: - nodes_exist = self._loadbalancerpool_nodes_exists(alias=alias, - location=location, - lb_id=lb_id, - pool_id=pool_id, - nodes_to_check=nodes) - if not nodes_exist: - changed = True - result = self.set_loadbalancernodes(alias=alias, - location=location, - lb_id=lb_id, - pool_id=pool_id, - nodes=nodes) - else: - result = "Pool doesn't exist" - else: - result = "Load balancer doesn't Exist" - return changed, result - - def ensure_lbpool_nodes_present(self, alias, location, name, port, nodes): - """ - Checks to see if the provided list of nodes exist for the pool and add the missing nodes to the pool - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param name: the name of the load balancer - :param port: the port that the load balancer will listen on - :param nodes: the list of nodes to be added - :return: (changed, result) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - lb_exists = self._loadbalancer_exists(name=name) - if lb_exists: - lb_id = self._get_loadbalancer_id(name=name) - pool_id = self._loadbalancerpool_exists( - alias=alias, - location=location, - port=port, - lb_id=lb_id) - if pool_id: - changed, result = self.add_lbpool_nodes(alias=alias, - location=location, - lb_id=lb_id, - pool_id=pool_id, - nodes_to_add=nodes) - else: - result = "Pool doesn't exist" - else: - result = "Load balancer doesn't Exist" - return changed, result - - def ensure_lbpool_nodes_absent(self, alias, location, name, port, nodes): - """ - Checks to see if the provided list of nodes exist for the pool and removes them if found any - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param name: the name of the load balancer - :param port: the port that the load balancer will listen on - :param nodes: the list of nodes to be removed - :return: (changed, result) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - lb_exists = self._loadbalancer_exists(name=name) - if lb_exists: - lb_id = self._get_loadbalancer_id(name=name) - pool_id = self._loadbalancerpool_exists( - alias=alias, - location=location, - port=port, - lb_id=lb_id) - if pool_id: - changed, result = self.remove_lbpool_nodes(alias=alias, - location=location, - lb_id=lb_id, - pool_id=pool_id, - nodes_to_remove=nodes) - else: - result = "Pool doesn't exist" - else: - result = "Load balancer doesn't Exist" - return changed, result - - def create_loadbalancer(self, name, alias, location, description, status): - """ - Create a loadbalancer w/ params - :param name: Name of loadbalancer - :param alias: Alias of account - :param location: Datacenter - :param description: Description for loadbalancer to be created - :param status: Enabled / Disabled - :return: result: The result from the CLC API call - """ - result = None - try: - result = self.clc.v2.API.Call('POST', - '/v2/sharedLoadBalancers/%s/%s' % (alias, - location), - json.dumps({"name": name, - "description": description, - "status": status})) - sleep(1) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to create load balancer "{0}". {1}'.format( - name, str(e.response_text))) - return result - - def create_loadbalancerpool( - self, alias, location, lb_id, method, persistence, port): - """ - Creates a pool on the provided load balancer - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param method: the load balancing method - :param persistence: the load balancing persistence type - :param port: the port that the load balancer will listen on - :return: result: The result from the create API call - """ - result = None - try: - result = self.clc.v2.API.Call( - 'POST', '/v2/sharedLoadBalancers/%s/%s/%s/pools' % - (alias, location, lb_id), json.dumps( - { - "port": port, "method": method, "persistence": persistence - })) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to create pool for load balancer id "{0}". {1}'.format( - lb_id, str(e.response_text))) - return result - - def delete_loadbalancer(self, alias, location, name): - """ - Delete CLC loadbalancer - :param alias: Alias for account - :param location: Datacenter - :param name: Name of the loadbalancer to delete - :return: result: The result from the CLC API call - """ - result = None - lb_id = self._get_loadbalancer_id(name=name) - try: - result = self.clc.v2.API.Call( - 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s' % - (alias, location, lb_id)) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to delete load balancer "{0}". {1}'.format( - name, str(e.response_text))) - return result - - def delete_loadbalancerpool(self, alias, location, lb_id, pool_id): - """ - Delete the pool on the provided load balancer - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param pool_id: the id string of the load balancer pool - :return: result: The result from the delete API call - """ - result = None - try: - result = self.clc.v2.API.Call( - 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s' % - (alias, location, lb_id, pool_id)) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to delete pool for load balancer id "{0}". {1}'.format( - lb_id, str(e.response_text))) - return result - - def _get_loadbalancer_id(self, name): - """ - Retrieves unique ID of loadbalancer - :param name: Name of loadbalancer - :return: Unique ID of the loadbalancer - """ - id = None - for lb in self.lb_dict: - if lb.get('name') == name: - id = lb.get('id') - return id - - def _get_loadbalancer_list(self, alias, location): - """ - Retrieve a list of loadbalancers - :param alias: Alias for account - :param location: Datacenter - :return: JSON data for all loadbalancers at datacenter - """ - result = None - try: - result = self.clc.v2.API.Call( - 'GET', '/v2/sharedLoadBalancers/%s/%s' % (alias, location)) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to fetch load balancers for account: {0}. {1}'.format( - alias, str(e.response_text))) - return result - - def _loadbalancer_exists(self, name): - """ - Verify a loadbalancer exists - :param name: Name of loadbalancer - :return: False or the ID of the existing loadbalancer - """ - result = False - - for lb in self.lb_dict: - if lb.get('name') == name: - result = lb.get('id') - return result - - def _loadbalancerpool_exists(self, alias, location, port, lb_id): - """ - Checks to see if a pool exists on the specified port on the provided load balancer - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param port: the port to check and see if it exists - :param lb_id: the id string of the provided load balancer - :return: result: The id string of the pool or False - """ - result = False - try: - pool_list = self.clc.v2.API.Call( - 'GET', '/v2/sharedLoadBalancers/%s/%s/%s/pools' % - (alias, location, lb_id)) - except APIFailedResponse as e: - return self.module.fail_json( - msg='Unable to fetch the load balancer pools for for load balancer id: {0}. {1}'.format( - lb_id, str(e.response_text))) - for pool in pool_list: - if int(pool.get('port')) == int(port): - result = pool.get('id') - return result - - def _loadbalancerpool_nodes_exists( - self, alias, location, lb_id, pool_id, nodes_to_check): - """ - Checks to see if a set of nodes exists on the specified port on the provided load balancer - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the provided load balancer - :param pool_id: the id string of the load balancer pool - :param nodes_to_check: the list of nodes to check for - :return: result: True / False indicating if the given nodes exist - """ - result = False - nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id) - for node in nodes_to_check: - if not node.get('status'): - node['status'] = 'enabled' - if node in nodes: - result = True - else: - result = False - return result - - def set_loadbalancernodes(self, alias, location, lb_id, pool_id, nodes): - """ - Updates nodes to the provided pool - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param pool_id: the id string of the pool - :param nodes: a list of dictionaries containing the nodes to set - :return: result: The result from the CLC API call - """ - result = None - if not lb_id: - return result - if not self.module.check_mode: - try: - result = self.clc.v2.API.Call('PUT', - '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes' - % (alias, location, lb_id, pool_id), json.dumps(nodes)) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to set nodes for the load balancer pool id "{0}". {1}'.format( - pool_id, str(e.response_text))) - return result - - def add_lbpool_nodes(self, alias, location, lb_id, pool_id, nodes_to_add): - """ - Add nodes to the provided pool - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param pool_id: the id string of the pool - :param nodes_to_add: a list of dictionaries containing the nodes to add - :return: (changed, result) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - result = {} - nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id) - for node in nodes_to_add: - if not node.get('status'): - node['status'] = 'enabled' - if node not in nodes: - changed = True - nodes.append(node) - if changed is True and not self.module.check_mode: - result = self.set_loadbalancernodes( - alias, - location, - lb_id, - pool_id, - nodes) - return changed, result - - def remove_lbpool_nodes( - self, alias, location, lb_id, pool_id, nodes_to_remove): - """ - Removes nodes from the provided pool - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param pool_id: the id string of the pool - :param nodes_to_remove: a list of dictionaries containing the nodes to remove - :return: (changed, result) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - result = {} - nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id) - for node in nodes_to_remove: - if not node.get('status'): - node['status'] = 'enabled' - if node in nodes: - changed = True - nodes.remove(node) - if changed is True and not self.module.check_mode: - result = self.set_loadbalancernodes( - alias, - location, - lb_id, - pool_id, - nodes) - return changed, result - - def _get_lbpool_nodes(self, alias, location, lb_id, pool_id): - """ - Return the list of nodes available to the provided load balancer pool - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param pool_id: the id string of the pool - :return: result: The list of nodes - """ - result = None - try: - result = self.clc.v2.API.Call('GET', - '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes' - % (alias, location, lb_id, pool_id)) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to fetch list of available nodes for load balancer pool id: {0}. {1}'.format( - pool_id, str(e.response_text))) - return result - - @staticmethod - def define_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - name=dict(required=True), - description=dict(default=None), - location=dict(required=True), - alias=dict(required=True), - port=dict(choices=[80, 443]), - method=dict(choices=['leastConnection', 'roundRobin']), - persistence=dict(choices=['standard', 'sticky']), - nodes=dict(type='list', default=[], elements='dict'), - status=dict(default='enabled', choices=['enabled', 'disabled']), - state=dict( - default='present', - choices=[ - 'present', - 'absent', - 'port_absent', - 'nodes_present', - 'nodes_absent']) - ) - return argument_spec - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - module = AnsibleModule(argument_spec=ClcLoadBalancer.define_argument_spec(), - supports_check_mode=True) - clc_loadbalancer = ClcLoadBalancer(module) - clc_loadbalancer.process_request() - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_modify_server.py b/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_modify_server.py deleted file mode 100644 index 27cdf614..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_modify_server.py +++ /dev/null @@ -1,967 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_modify_server -short_description: modify servers in CenturyLink Cloud. -description: - - An Ansible module to modify servers in CenturyLink Cloud. -options: - server_ids: - description: - - A list of server Ids to modify. - type: list - required: True - elements: str - cpu: - description: - - How many CPUs to update on the server - type: str - memory: - description: - - Memory (in GB) to set to the server. - type: str - anti_affinity_policy_id: - description: - - The anti affinity policy id to be set for a hyper scale server. - This is mutually exclusive with 'anti_affinity_policy_name' - type: str - anti_affinity_policy_name: - description: - - The anti affinity policy name to be set for a hyper scale server. - This is mutually exclusive with 'anti_affinity_policy_id' - type: str - alert_policy_id: - description: - - The alert policy id to be associated to the server. - This is mutually exclusive with 'alert_policy_name' - type: str - alert_policy_name: - description: - - The alert policy name to be associated to the server. - This is mutually exclusive with 'alert_policy_id' - type: str - state: - description: - - The state to insure that the provided resources are in. - type: str - default: 'present' - choices: ['present', 'absent'] - wait: - description: - - Whether to wait for the provisioning tasks to finish before returning. - type: bool - default: 'yes' -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - -- name: Set the cpu count to 4 on a server - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - cpu: 4 - state: present - -- name: Set the memory to 8GB on a server - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - memory: 8 - state: present - -- name: Set the anti affinity policy on a server - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - anti_affinity_policy_name: 'aa_policy' - state: present - -- name: Remove the anti affinity policy on a server - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - anti_affinity_policy_name: 'aa_policy' - state: absent - -- name: Add the alert policy on a server - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - alert_policy_name: 'alert_policy' - state: present - -- name: Remove the alert policy on a server - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - alert_policy_name: 'alert_policy' - state: absent - -- name: Ret the memory to 16GB and cpu to 8 core on a lust if servers - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - cpu: 8 - memory: 16 - state: present -''' - -RETURN = ''' -server_ids: - description: The list of server ids that are changed - returned: success - type: list - sample: - [ - "UC1TEST-SVR01", - "UC1TEST-SVR02" - ] -servers: - description: The list of server objects that are changed - returned: success - type: list - sample: - [ - { - "changeInfo":{ - "createdBy":"service.wfad", - "createdDate":1438196820, - "modifiedBy":"service.wfad", - "modifiedDate":1438196820 - }, - "description":"test-server", - "details":{ - "alertPolicies":[ - - ], - "cpu":1, - "customFields":[ - - ], - "diskCount":3, - "disks":[ - { - "id":"0:0", - "partitionPaths":[ - - ], - "sizeGB":1 - }, - { - "id":"0:1", - "partitionPaths":[ - - ], - "sizeGB":2 - }, - { - "id":"0:2", - "partitionPaths":[ - - ], - "sizeGB":14 - } - ], - "hostName":"", - "inMaintenanceMode":false, - "ipAddresses":[ - { - "internal":"10.1.1.1" - } - ], - "memoryGB":1, - "memoryMB":1024, - "partitions":[ - - ], - "powerState":"started", - "snapshots":[ - - ], - "storageGB":17 - }, - "groupId":"086ac1dfe0b6411989e8d1b77c4065f0", - "id":"test-server", - "ipaddress":"10.120.45.23", - "isTemplate":false, - "links":[ - { - "href":"/v2/servers/wfad/test-server", - "id":"test-server", - "rel":"self", - "verbs":[ - "GET", - "PATCH", - "DELETE" - ] - }, - { - "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0", - "id":"086ac1dfe0b6411989e8d1b77c4065f0", - "rel":"group" - }, - { - "href":"/v2/accounts/wfad", - "id":"wfad", - "rel":"account" - }, - { - "href":"/v2/billing/wfad/serverPricing/test-server", - "rel":"billing" - }, - { - "href":"/v2/servers/wfad/test-server/publicIPAddresses", - "rel":"publicIPAddresses", - "verbs":[ - "POST" - ] - }, - { - "href":"/v2/servers/wfad/test-server/credentials", - "rel":"credentials" - }, - { - "href":"/v2/servers/wfad/test-server/statistics", - "rel":"statistics" - }, - { - "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities", - "rel":"upcomingScheduledActivities" - }, - { - "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities", - "rel":"scheduledActivities", - "verbs":[ - "GET", - "POST" - ] - }, - { - "href":"/v2/servers/wfad/test-server/capabilities", - "rel":"capabilities" - }, - { - "href":"/v2/servers/wfad/test-server/alertPolicies", - "rel":"alertPolicyMappings", - "verbs":[ - "POST" - ] - }, - { - "href":"/v2/servers/wfad/test-server/antiAffinityPolicy", - "rel":"antiAffinityPolicyMapping", - "verbs":[ - "PUT", - "DELETE" - ] - }, - { - "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy", - "rel":"cpuAutoscalePolicyMapping", - "verbs":[ - "PUT", - "DELETE" - ] - } - ], - "locationId":"UC1", - "name":"test-server", - "os":"ubuntu14_64Bit", - "osType":"Ubuntu 14 64-bit", - "status":"active", - "storageType":"standard", - "type":"standard" - } - ] -''' - -__version__ = '${version}' - -import json -import os -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException - from clc import APIFailedResponse -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcModifyServer: - clc = clc_sdk - - def __init__(self, module): - """ - Construct module - """ - self.clc = clc_sdk - self.module = module - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - self._set_clc_credentials_from_env() - - p = self.module.params - cpu = p.get('cpu') - memory = p.get('memory') - state = p.get('state') - if state == 'absent' and (cpu or memory): - return self.module.fail_json( - msg='\'absent\' state is not supported for \'cpu\' and \'memory\' arguments') - - server_ids = p['server_ids'] - if not isinstance(server_ids, list): - return self.module.fail_json( - msg='server_ids needs to be a list of instances to modify: %s' % - server_ids) - - (changed, server_dict_array, changed_server_ids) = self._modify_servers( - server_ids=server_ids) - - self.module.exit_json( - changed=changed, - server_ids=changed_server_ids, - servers=server_dict_array) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - server_ids=dict(type='list', required=True, elements='str'), - state=dict(default='present', choices=['present', 'absent']), - cpu=dict(), - memory=dict(), - anti_affinity_policy_id=dict(), - anti_affinity_policy_name=dict(), - alert_policy_id=dict(), - alert_policy_name=dict(), - wait=dict(type='bool', default=True) - ) - mutually_exclusive = [ - ['anti_affinity_policy_id', 'anti_affinity_policy_name'], - ['alert_policy_id', 'alert_policy_name'] - ] - return {"argument_spec": argument_spec, - "mutually_exclusive": mutually_exclusive} - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _get_servers_from_clc(self, server_list, message): - """ - Internal function to fetch list of CLC server objects from a list of server ids - :param server_list: The list of server ids - :param message: the error message to throw in case of any error - :return the list of CLC server objects - """ - try: - return self.clc.v2.Servers(server_list).servers - except CLCException as ex: - return self.module.fail_json(msg=message + ': %s' % ex.message) - - def _modify_servers(self, server_ids): - """ - modify the servers configuration on the provided list - :param server_ids: list of servers to modify - :return: a list of dictionaries with server information about the servers that were modified - """ - p = self.module.params - state = p.get('state') - server_params = { - 'cpu': p.get('cpu'), - 'memory': p.get('memory'), - 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'), - 'anti_affinity_policy_name': p.get('anti_affinity_policy_name'), - 'alert_policy_id': p.get('alert_policy_id'), - 'alert_policy_name': p.get('alert_policy_name'), - } - changed = False - server_changed = False - aa_changed = False - ap_changed = False - server_dict_array = [] - result_server_ids = [] - request_list = [] - changed_servers = [] - - if not isinstance(server_ids, list) or len(server_ids) < 1: - return self.module.fail_json( - msg='server_ids should be a list of servers, aborting') - - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - for server in servers: - if state == 'present': - server_changed, server_result = self._ensure_server_config( - server, server_params) - if server_result: - request_list.append(server_result) - aa_changed = self._ensure_aa_policy_present( - server, - server_params) - ap_changed = self._ensure_alert_policy_present( - server, - server_params) - elif state == 'absent': - aa_changed = self._ensure_aa_policy_absent( - server, - server_params) - ap_changed = self._ensure_alert_policy_absent( - server, - server_params) - if server_changed or aa_changed or ap_changed: - changed_servers.append(server) - changed = True - - self._wait_for_requests(self.module, request_list) - self._refresh_servers(self.module, changed_servers) - - for server in changed_servers: - server_dict_array.append(server.data) - result_server_ids.append(server.id) - - return changed, server_dict_array, result_server_ids - - def _ensure_server_config( - self, server, server_params): - """ - ensures the server is updated with the provided cpu and memory - :param server: the CLC server object - :param server_params: the dictionary of server parameters - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - cpu = server_params.get('cpu') - memory = server_params.get('memory') - changed = False - result = None - - if not cpu: - cpu = server.cpu - if not memory: - memory = server.memory - if memory != server.memory or cpu != server.cpu: - if not self.module.check_mode: - result = self._modify_clc_server( - self.clc, - self.module, - server.id, - cpu, - memory) - changed = True - return changed, result - - @staticmethod - def _modify_clc_server(clc, module, server_id, cpu, memory): - """ - Modify the memory or CPU of a clc server. - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param server_id: id of the server to modify - :param cpu: the new cpu value - :param memory: the new memory value - :return: the result of CLC API call - """ - result = None - acct_alias = clc.v2.Account.GetAlias() - try: - # Update the server configuration - job_obj = clc.v2.API.Call('PATCH', - 'servers/%s/%s' % (acct_alias, - server_id), - json.dumps([{"op": "set", - "member": "memory", - "value": memory}, - {"op": "set", - "member": "cpu", - "value": cpu}])) - result = clc.v2.Requests(job_obj) - except APIFailedResponse as ex: - module.fail_json( - msg='Unable to update the server configuration for server : "{0}". {1}'.format( - server_id, str(ex.response_text))) - return result - - @staticmethod - def _wait_for_requests(module, request_list): - """ - Block until server provisioning requests are completed. - :param module: the AnsibleModule object - :param request_list: a list of clc-sdk.Request instances - :return: none - """ - wait = module.params.get('wait') - if wait: - # Requests.WaitUntilComplete() returns the count of failed requests - failed_requests_count = sum( - [request.WaitUntilComplete() for request in request_list]) - - if failed_requests_count > 0: - module.fail_json( - msg='Unable to process modify server request') - - @staticmethod - def _refresh_servers(module, servers): - """ - Loop through a list of servers and refresh them. - :param module: the AnsibleModule object - :param servers: list of clc-sdk.Server instances to refresh - :return: none - """ - for server in servers: - try: - server.Refresh() - except CLCException as ex: - module.fail_json(msg='Unable to refresh the server {0}. {1}'.format( - server.id, ex.message - )) - - def _ensure_aa_policy_present( - self, server, server_params): - """ - ensures the server is updated with the provided anti affinity policy - :param server: the CLC server object - :param server_params: the dictionary of server parameters - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - acct_alias = self.clc.v2.Account.GetAlias() - - aa_policy_id = server_params.get('anti_affinity_policy_id') - aa_policy_name = server_params.get('anti_affinity_policy_name') - if not aa_policy_id and aa_policy_name: - aa_policy_id = self._get_aa_policy_id_by_name( - self.clc, - self.module, - acct_alias, - aa_policy_name) - current_aa_policy_id = self._get_aa_policy_id_of_server( - self.clc, - self.module, - acct_alias, - server.id) - - if aa_policy_id and aa_policy_id != current_aa_policy_id: - self._modify_aa_policy( - self.clc, - self.module, - acct_alias, - server.id, - aa_policy_id) - changed = True - return changed - - def _ensure_aa_policy_absent( - self, server, server_params): - """ - ensures the provided anti affinity policy is removed from the server - :param server: the CLC server object - :param server_params: the dictionary of server parameters - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - acct_alias = self.clc.v2.Account.GetAlias() - aa_policy_id = server_params.get('anti_affinity_policy_id') - aa_policy_name = server_params.get('anti_affinity_policy_name') - if not aa_policy_id and aa_policy_name: - aa_policy_id = self._get_aa_policy_id_by_name( - self.clc, - self.module, - acct_alias, - aa_policy_name) - current_aa_policy_id = self._get_aa_policy_id_of_server( - self.clc, - self.module, - acct_alias, - server.id) - - if aa_policy_id and aa_policy_id == current_aa_policy_id: - self._delete_aa_policy( - self.clc, - self.module, - acct_alias, - server.id) - changed = True - return changed - - @staticmethod - def _modify_aa_policy(clc, module, acct_alias, server_id, aa_policy_id): - """ - modifies the anti affinity policy of the CLC server - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param acct_alias: the CLC account alias - :param server_id: the CLC server id - :param aa_policy_id: the anti affinity policy id - :return: result: The result from the CLC API call - """ - result = None - if not module.check_mode: - try: - result = clc.v2.API.Call('PUT', - 'servers/%s/%s/antiAffinityPolicy' % ( - acct_alias, - server_id), - json.dumps({"id": aa_policy_id})) - except APIFailedResponse as ex: - module.fail_json( - msg='Unable to modify anti affinity policy to server : "{0}". {1}'.format( - server_id, str(ex.response_text))) - return result - - @staticmethod - def _delete_aa_policy(clc, module, acct_alias, server_id): - """ - Delete the anti affinity policy of the CLC server - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param acct_alias: the CLC account alias - :param server_id: the CLC server id - :return: result: The result from the CLC API call - """ - result = None - if not module.check_mode: - try: - result = clc.v2.API.Call('DELETE', - 'servers/%s/%s/antiAffinityPolicy' % ( - acct_alias, - server_id), - json.dumps({})) - except APIFailedResponse as ex: - module.fail_json( - msg='Unable to delete anti affinity policy to server : "{0}". {1}'.format( - server_id, str(ex.response_text))) - return result - - @staticmethod - def _get_aa_policy_id_by_name(clc, module, alias, aa_policy_name): - """ - retrieves the anti affinity policy id of the server based on the name of the policy - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param alias: the CLC account alias - :param aa_policy_name: the anti affinity policy name - :return: aa_policy_id: The anti affinity policy id - """ - aa_policy_id = None - try: - aa_policies = clc.v2.API.Call(method='GET', - url='antiAffinityPolicies/%s' % alias) - except APIFailedResponse as ex: - return module.fail_json( - msg='Unable to fetch anti affinity policies from account alias : "{0}". {1}'.format( - alias, str(ex.response_text))) - for aa_policy in aa_policies.get('items'): - if aa_policy.get('name') == aa_policy_name: - if not aa_policy_id: - aa_policy_id = aa_policy.get('id') - else: - return module.fail_json( - msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name) - if not aa_policy_id: - module.fail_json( - msg='No anti affinity policy was found with policy name : %s' % aa_policy_name) - return aa_policy_id - - @staticmethod - def _get_aa_policy_id_of_server(clc, module, alias, server_id): - """ - retrieves the anti affinity policy id of the server based on the CLC server id - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param alias: the CLC account alias - :param server_id: the CLC server id - :return: aa_policy_id: The anti affinity policy id - """ - aa_policy_id = None - try: - result = clc.v2.API.Call( - method='GET', url='servers/%s/%s/antiAffinityPolicy' % - (alias, server_id)) - aa_policy_id = result.get('id') - except APIFailedResponse as ex: - if ex.response_status_code != 404: - module.fail_json(msg='Unable to fetch anti affinity policy for server "{0}". {1}'.format( - server_id, str(ex.response_text))) - return aa_policy_id - - def _ensure_alert_policy_present( - self, server, server_params): - """ - ensures the server is updated with the provided alert policy - :param server: the CLC server object - :param server_params: the dictionary of server parameters - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - acct_alias = self.clc.v2.Account.GetAlias() - alert_policy_id = server_params.get('alert_policy_id') - alert_policy_name = server_params.get('alert_policy_name') - if not alert_policy_id and alert_policy_name: - alert_policy_id = self._get_alert_policy_id_by_name( - self.clc, - self.module, - acct_alias, - alert_policy_name) - if alert_policy_id and not self._alert_policy_exists( - server, alert_policy_id): - self._add_alert_policy_to_server( - self.clc, - self.module, - acct_alias, - server.id, - alert_policy_id) - changed = True - return changed - - def _ensure_alert_policy_absent( - self, server, server_params): - """ - ensures the alert policy is removed from the server - :param server: the CLC server object - :param server_params: the dictionary of server parameters - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - - acct_alias = self.clc.v2.Account.GetAlias() - alert_policy_id = server_params.get('alert_policy_id') - alert_policy_name = server_params.get('alert_policy_name') - if not alert_policy_id and alert_policy_name: - alert_policy_id = self._get_alert_policy_id_by_name( - self.clc, - self.module, - acct_alias, - alert_policy_name) - - if alert_policy_id and self._alert_policy_exists( - server, alert_policy_id): - self._remove_alert_policy_to_server( - self.clc, - self.module, - acct_alias, - server.id, - alert_policy_id) - changed = True - return changed - - @staticmethod - def _add_alert_policy_to_server( - clc, module, acct_alias, server_id, alert_policy_id): - """ - add the alert policy to CLC server - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param acct_alias: the CLC account alias - :param server_id: the CLC server id - :param alert_policy_id: the alert policy id - :return: result: The result from the CLC API call - """ - result = None - if not module.check_mode: - try: - result = clc.v2.API.Call('POST', - 'servers/%s/%s/alertPolicies' % ( - acct_alias, - server_id), - json.dumps({"id": alert_policy_id})) - except APIFailedResponse as ex: - module.fail_json(msg='Unable to set alert policy to the server : "{0}". {1}'.format( - server_id, str(ex.response_text))) - return result - - @staticmethod - def _remove_alert_policy_to_server( - clc, module, acct_alias, server_id, alert_policy_id): - """ - remove the alert policy to the CLC server - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param acct_alias: the CLC account alias - :param server_id: the CLC server id - :param alert_policy_id: the alert policy id - :return: result: The result from the CLC API call - """ - result = None - if not module.check_mode: - try: - result = clc.v2.API.Call('DELETE', - 'servers/%s/%s/alertPolicies/%s' - % (acct_alias, server_id, alert_policy_id)) - except APIFailedResponse as ex: - module.fail_json(msg='Unable to remove alert policy from the server : "{0}". {1}'.format( - server_id, str(ex.response_text))) - return result - - @staticmethod - def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name): - """ - retrieves the alert policy id of the server based on the name of the policy - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param alias: the CLC account alias - :param alert_policy_name: the alert policy name - :return: alert_policy_id: The alert policy id - """ - alert_policy_id = None - try: - alert_policies = clc.v2.API.Call(method='GET', - url='alertPolicies/%s' % alias) - except APIFailedResponse as ex: - return module.fail_json(msg='Unable to fetch alert policies for account : "{0}". {1}'.format( - alias, str(ex.response_text))) - for alert_policy in alert_policies.get('items'): - if alert_policy.get('name') == alert_policy_name: - if not alert_policy_id: - alert_policy_id = alert_policy.get('id') - else: - return module.fail_json( - msg='multiple alert policies were found with policy name : %s' % alert_policy_name) - return alert_policy_id - - @staticmethod - def _alert_policy_exists(server, alert_policy_id): - """ - Checks if the alert policy exists for the server - :param server: the clc server object - :param alert_policy_id: the alert policy - :return: True: if the given alert policy id associated to the server, False otherwise - """ - result = False - alert_policies = server.alertPolicies - if alert_policies: - for alert_policy in alert_policies: - if alert_policy.get('id') == alert_policy_id: - result = True - return result - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - - argument_dict = ClcModifyServer._define_module_argument_spec() - module = AnsibleModule(supports_check_mode=True, **argument_dict) - clc_modify_server = ClcModifyServer(module) - clc_modify_server.process_request() - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_publicip.py b/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_publicip.py deleted file mode 100644 index 3b4fcc4e..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_publicip.py +++ /dev/null @@ -1,361 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_publicip -short_description: Add and Delete public ips on servers in CenturyLink Cloud. -description: - - An Ansible module to add or delete public ip addresses on an existing server or servers in CenturyLink Cloud. -options: - protocol: - description: - - The protocol that the public IP will listen for. - type: str - default: TCP - choices: ['TCP', 'UDP', 'ICMP'] - ports: - description: - - A list of ports to expose. This is required when state is 'present' - type: list - elements: int - server_ids: - description: - - A list of servers to create public ips on. - type: list - required: True - elements: str - state: - description: - - Determine whether to create or delete public IPs. If present module will not create a second public ip if one - already exists. - type: str - default: present - choices: ['present', 'absent'] - wait: - description: - - Whether to wait for the tasks to finish before returning. - type: bool - default: 'yes' -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - -- name: Add Public IP to Server - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Create Public IP For Servers - community.general.clc_publicip: - protocol: TCP - ports: - - 80 - server_ids: - - UC1TEST-SVR01 - - UC1TEST-SVR02 - state: present - register: clc - - - name: Debug - ansible.builtin.debug: - var: clc - -- name: Delete Public IP from Server - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Create Public IP For Servers - community.general.clc_publicip: - server_ids: - - UC1TEST-SVR01 - - UC1TEST-SVR02 - state: absent - register: clc - - - name: Debug - ansible.builtin.debug: - var: clc -''' - -RETURN = ''' -server_ids: - description: The list of server ids that are changed - returned: success - type: list - sample: - [ - "UC1TEST-SVR01", - "UC1TEST-SVR02" - ] -''' - -__version__ = '${version}' - -import os -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcPublicIp(object): - clc = clc_sdk - module = None - - def __init__(self, module): - """ - Construct module - """ - self.module = module - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - self._set_clc_credentials_from_env() - params = self.module.params - server_ids = params['server_ids'] - ports = params['ports'] - protocol = params['protocol'] - state = params['state'] - - if state == 'present': - changed, changed_server_ids, requests = self.ensure_public_ip_present( - server_ids=server_ids, protocol=protocol, ports=ports) - elif state == 'absent': - changed, changed_server_ids, requests = self.ensure_public_ip_absent( - server_ids=server_ids) - else: - return self.module.fail_json(msg="Unknown State: " + state) - self._wait_for_requests_to_complete(requests) - return self.module.exit_json(changed=changed, - server_ids=changed_server_ids) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - server_ids=dict(type='list', required=True, elements='str'), - protocol=dict(default='TCP', choices=['TCP', 'UDP', 'ICMP']), - ports=dict(type='list', elements='int'), - wait=dict(type='bool', default=True), - state=dict(default='present', choices=['present', 'absent']), - ) - return argument_spec - - def ensure_public_ip_present(self, server_ids, protocol, ports): - """ - Ensures the given server ids having the public ip available - :param server_ids: the list of server ids - :param protocol: the ip protocol - :param ports: the list of ports to expose - :return: (changed, changed_server_ids, results) - changed: A flag indicating if there is any change - changed_server_ids : the list of server ids that are changed - results: The result list from clc public ip call - """ - changed = False - results = [] - changed_server_ids = [] - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - servers_to_change = [ - server for server in servers if len( - server.PublicIPs().public_ips) == 0] - ports_to_expose = [{'protocol': protocol, 'port': port} - for port in ports] - for server in servers_to_change: - if not self.module.check_mode: - result = self._add_publicip_to_server(server, ports_to_expose) - results.append(result) - changed_server_ids.append(server.id) - changed = True - return changed, changed_server_ids, results - - def _add_publicip_to_server(self, server, ports_to_expose): - result = None - try: - result = server.PublicIPs().Add(ports_to_expose) - except CLCException as ex: - self.module.fail_json(msg='Failed to add public ip to the server : {0}. {1}'.format( - server.id, ex.response_text - )) - return result - - def ensure_public_ip_absent(self, server_ids): - """ - Ensures the given server ids having the public ip removed if there is any - :param server_ids: the list of server ids - :return: (changed, changed_server_ids, results) - changed: A flag indicating if there is any change - changed_server_ids : the list of server ids that are changed - results: The result list from clc public ip call - """ - changed = False - results = [] - changed_server_ids = [] - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - servers_to_change = [ - server for server in servers if len( - server.PublicIPs().public_ips) > 0] - for server in servers_to_change: - if not self.module.check_mode: - result = self._remove_publicip_from_server(server) - results.append(result) - changed_server_ids.append(server.id) - changed = True - return changed, changed_server_ids, results - - def _remove_publicip_from_server(self, server): - result = None - try: - for ip_address in server.PublicIPs().public_ips: - result = ip_address.Delete() - except CLCException as ex: - self.module.fail_json(msg='Failed to remove public ip from the server : {0}. {1}'.format( - server.id, ex.response_text - )) - return result - - def _wait_for_requests_to_complete(self, requests_lst): - """ - Waits until the CLC requests are complete if the wait argument is True - :param requests_lst: The list of CLC request objects - :return: none - """ - if not self.module.params['wait']: - return - for request in requests_lst: - request.WaitUntilComplete() - for request_details in request.requests: - if request_details.Status() != 'succeeded': - self.module.fail_json( - msg='Unable to process public ip request') - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _get_servers_from_clc(self, server_ids, message): - """ - Gets list of servers form CLC api - """ - try: - return self.clc.v2.Servers(server_ids).servers - except CLCException as exception: - self.module.fail_json(msg=message + ': %s' % exception) - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - module = AnsibleModule( - argument_spec=ClcPublicIp._define_module_argument_spec(), - supports_check_mode=True - ) - clc_public_ip = ClcPublicIp(module) - clc_public_ip.process_request() - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_server.py b/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_server.py deleted file mode 100644 index 73403987..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_server.py +++ /dev/null @@ -1,1563 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_server -short_description: Create, Delete, Start and Stop servers in CenturyLink Cloud. -description: - - An Ansible module to Create, Delete, Start and Stop servers in CenturyLink Cloud. -options: - additional_disks: - description: - - The list of additional disks for the server - type: list - elements: dict - default: [] - add_public_ip: - description: - - Whether to add a public ip to the server - type: bool - default: 'no' - alias: - description: - - The account alias to provision the servers under. - type: str - anti_affinity_policy_id: - description: - - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_name'. - type: str - anti_affinity_policy_name: - description: - - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_id'. - type: str - alert_policy_id: - description: - - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_name'. - type: str - alert_policy_name: - description: - - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_id'. - type: str - count: - description: - - The number of servers to build (mutually exclusive with exact_count) - default: 1 - type: int - count_group: - description: - - Required when exact_count is specified. The Server Group use to determine how many servers to deploy. - type: str - cpu: - description: - - How many CPUs to provision on the server - default: 1 - type: int - cpu_autoscale_policy_id: - description: - - The autoscale policy to assign to the server. - type: str - custom_fields: - description: - - The list of custom fields to set on the server. - type: list - default: [] - elements: dict - description: - description: - - The description to set for the server. - type: str - exact_count: - description: - - Run in idempotent mode. Will insure that this exact number of servers are running in the provided group, - creating and deleting them to reach that count. Requires count_group to be set. - type: int - group: - description: - - The Server Group to create servers under. - type: str - default: 'Default Group' - ip_address: - description: - - The IP Address for the server. One is assigned if not provided. - type: str - location: - description: - - The Datacenter to create servers in. - type: str - managed_os: - description: - - Whether to create the server as 'Managed' or not. - type: bool - default: 'no' - required: False - memory: - description: - - Memory in GB. - type: int - default: 1 - name: - description: - - A 1 to 6 character identifier to use for the server. This is required when state is 'present' - type: str - network_id: - description: - - The network UUID on which to create servers. - type: str - packages: - description: - - The list of blue print packages to run on the server after its created. - type: list - elements: dict - default: [] - password: - description: - - Password for the administrator / root user - type: str - primary_dns: - description: - - Primary DNS used by the server. - type: str - public_ip_protocol: - description: - - The protocol to use for the public ip if add_public_ip is set to True. - type: str - default: 'TCP' - choices: ['TCP', 'UDP', 'ICMP'] - public_ip_ports: - description: - - A list of ports to allow on the firewall to the servers public ip, if add_public_ip is set to True. - type: list - elements: dict - default: [] - secondary_dns: - description: - - Secondary DNS used by the server. - type: str - server_ids: - description: - - Required for started, stopped, and absent states. - A list of server Ids to insure are started, stopped, or absent. - type: list - default: [] - elements: str - source_server_password: - description: - - The password for the source server if a clone is specified. - type: str - state: - description: - - The state to insure that the provided resources are in. - type: str - default: 'present' - choices: ['present', 'absent', 'started', 'stopped'] - storage_type: - description: - - The type of storage to attach to the server. - type: str - default: 'standard' - choices: ['standard', 'hyperscale'] - template: - description: - - The template to use for server creation. Will search for a template if a partial string is provided. - This is required when state is 'present' - type: str - ttl: - description: - - The time to live for the server in seconds. The server will be deleted when this time expires. - type: str - type: - description: - - The type of server to create. - type: str - default: 'standard' - choices: ['standard', 'hyperscale', 'bareMetal'] - configuration_id: - description: - - Only required for bare metal servers. - Specifies the identifier for the specific configuration type of bare metal server to deploy. - type: str - os_type: - description: - - Only required for bare metal servers. - Specifies the OS to provision with the bare metal server. - type: str - choices: ['redHat6_64Bit', 'centOS6_64Bit', 'windows2012R2Standard_64Bit', 'ubuntu14_64Bit'] - wait: - description: - - Whether to wait for the provisioning tasks to finish before returning. - type: bool - default: 'yes' -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - -- name: Provision a single Ubuntu Server - community.general.clc_server: - name: test - template: ubuntu-14-64 - count: 1 - group: Default Group - state: present - -- name: Ensure 'Default Group' has exactly 5 servers - community.general.clc_server: - name: test - template: ubuntu-14-64 - exact_count: 5 - count_group: Default Group - group: Default Group - -- name: Stop a Server - community.general.clc_server: - server_ids: - - UC1ACCT-TEST01 - state: stopped - -- name: Start a Server - community.general.clc_server: - server_ids: - - UC1ACCT-TEST01 - state: started - -- name: Delete a Server - community.general.clc_server: - server_ids: - - UC1ACCT-TEST01 - state: absent -''' - -RETURN = ''' -server_ids: - description: The list of server ids that are created - returned: success - type: list - sample: - [ - "UC1TEST-SVR01", - "UC1TEST-SVR02" - ] -partially_created_server_ids: - description: The list of server ids that are partially created - returned: success - type: list - sample: - [ - "UC1TEST-SVR01", - "UC1TEST-SVR02" - ] -servers: - description: The list of server objects returned from CLC - returned: success - type: list - sample: - [ - { - "changeInfo":{ - "createdBy":"service.wfad", - "createdDate":1438196820, - "modifiedBy":"service.wfad", - "modifiedDate":1438196820 - }, - "description":"test-server", - "details":{ - "alertPolicies":[ - - ], - "cpu":1, - "customFields":[ - - ], - "diskCount":3, - "disks":[ - { - "id":"0:0", - "partitionPaths":[ - - ], - "sizeGB":1 - }, - { - "id":"0:1", - "partitionPaths":[ - - ], - "sizeGB":2 - }, - { - "id":"0:2", - "partitionPaths":[ - - ], - "sizeGB":14 - } - ], - "hostName":"", - "inMaintenanceMode":false, - "ipAddresses":[ - { - "internal":"10.1.1.1" - } - ], - "memoryGB":1, - "memoryMB":1024, - "partitions":[ - - ], - "powerState":"started", - "snapshots":[ - - ], - "storageGB":17 - }, - "groupId":"086ac1dfe0b6411989e8d1b77c4065f0", - "id":"test-server", - "ipaddress":"10.120.45.23", - "isTemplate":false, - "links":[ - { - "href":"/v2/servers/wfad/test-server", - "id":"test-server", - "rel":"self", - "verbs":[ - "GET", - "PATCH", - "DELETE" - ] - }, - { - "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0", - "id":"086ac1dfe0b6411989e8d1b77c4065f0", - "rel":"group" - }, - { - "href":"/v2/accounts/wfad", - "id":"wfad", - "rel":"account" - }, - { - "href":"/v2/billing/wfad/serverPricing/test-server", - "rel":"billing" - }, - { - "href":"/v2/servers/wfad/test-server/publicIPAddresses", - "rel":"publicIPAddresses", - "verbs":[ - "POST" - ] - }, - { - "href":"/v2/servers/wfad/test-server/credentials", - "rel":"credentials" - }, - { - "href":"/v2/servers/wfad/test-server/statistics", - "rel":"statistics" - }, - { - "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities", - "rel":"upcomingScheduledActivities" - }, - { - "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities", - "rel":"scheduledActivities", - "verbs":[ - "GET", - "POST" - ] - }, - { - "href":"/v2/servers/wfad/test-server/capabilities", - "rel":"capabilities" - }, - { - "href":"/v2/servers/wfad/test-server/alertPolicies", - "rel":"alertPolicyMappings", - "verbs":[ - "POST" - ] - }, - { - "href":"/v2/servers/wfad/test-server/antiAffinityPolicy", - "rel":"antiAffinityPolicyMapping", - "verbs":[ - "PUT", - "DELETE" - ] - }, - { - "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy", - "rel":"cpuAutoscalePolicyMapping", - "verbs":[ - "PUT", - "DELETE" - ] - } - ], - "locationId":"UC1", - "name":"test-server", - "os":"ubuntu14_64Bit", - "osType":"Ubuntu 14 64-bit", - "status":"active", - "storageType":"standard", - "type":"standard" - } - ] -''' - -__version__ = '${version}' - -import json -import os -import time -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException - from clc import APIFailedResponse -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcServer: - clc = clc_sdk - - def __init__(self, module): - """ - Construct module - """ - self.clc = clc_sdk - self.module = module - self.group_dict = {} - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - changed = False - new_server_ids = [] - server_dict_array = [] - - self._set_clc_credentials_from_env() - self.module.params = self._validate_module_params( - self.clc, - self.module) - p = self.module.params - state = p.get('state') - - # - # Handle each state - # - partial_servers_ids = [] - if state == 'absent': - server_ids = p['server_ids'] - if not isinstance(server_ids, list): - return self.module.fail_json( - msg='server_ids needs to be a list of instances to delete: %s' % - server_ids) - - (changed, - server_dict_array, - new_server_ids) = self._delete_servers(module=self.module, - clc=self.clc, - server_ids=server_ids) - - elif state in ('started', 'stopped'): - server_ids = p.get('server_ids') - if not isinstance(server_ids, list): - return self.module.fail_json( - msg='server_ids needs to be a list of servers to run: %s' % - server_ids) - - (changed, - server_dict_array, - new_server_ids) = self._start_stop_servers(self.module, - self.clc, - server_ids) - - elif state == 'present': - # Changed is always set to true when provisioning new instances - if not p.get('template') and p.get('type') != 'bareMetal': - return self.module.fail_json( - msg='template parameter is required for new instance') - - if p.get('exact_count') is None: - (server_dict_array, - new_server_ids, - partial_servers_ids, - changed) = self._create_servers(self.module, - self.clc) - else: - (server_dict_array, - new_server_ids, - partial_servers_ids, - changed) = self._enforce_count(self.module, - self.clc) - - self.module.exit_json( - changed=changed, - server_ids=new_server_ids, - partially_created_server_ids=partial_servers_ids, - servers=server_dict_array) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - name=dict(), - template=dict(), - group=dict(default='Default Group'), - network_id=dict(), - location=dict(default=None), - cpu=dict(default=1, type='int'), - memory=dict(default=1, type='int'), - alias=dict(default=None), - password=dict(default=None, no_log=True), - ip_address=dict(default=None), - storage_type=dict( - default='standard', - choices=[ - 'standard', - 'hyperscale']), - type=dict(default='standard', choices=['standard', 'hyperscale', 'bareMetal']), - primary_dns=dict(default=None), - secondary_dns=dict(default=None), - additional_disks=dict(type='list', default=[], elements='dict'), - custom_fields=dict(type='list', default=[], elements='dict'), - ttl=dict(default=None), - managed_os=dict(type='bool', default=False), - description=dict(default=None), - source_server_password=dict(default=None, no_log=True), - cpu_autoscale_policy_id=dict(default=None), - anti_affinity_policy_id=dict(default=None), - anti_affinity_policy_name=dict(default=None), - alert_policy_id=dict(default=None), - alert_policy_name=dict(default=None), - packages=dict(type='list', default=[], elements='dict'), - state=dict( - default='present', - choices=[ - 'present', - 'absent', - 'started', - 'stopped']), - count=dict(type='int', default=1), - exact_count=dict(type='int', default=None), - count_group=dict(), - server_ids=dict(type='list', default=[], elements='str'), - add_public_ip=dict(type='bool', default=False), - public_ip_protocol=dict( - default='TCP', - choices=[ - 'TCP', - 'UDP', - 'ICMP']), - public_ip_ports=dict(type='list', default=[], elements='dict'), - configuration_id=dict(default=None), - os_type=dict(default=None, - choices=[ - 'redHat6_64Bit', - 'centOS6_64Bit', - 'windows2012R2Standard_64Bit', - 'ubuntu14_64Bit' - ]), - wait=dict(type='bool', default=True)) - - mutually_exclusive = [ - ['exact_count', 'count'], - ['exact_count', 'state'], - ['anti_affinity_policy_id', 'anti_affinity_policy_name'], - ['alert_policy_id', 'alert_policy_name'], - ] - return {"argument_spec": argument_spec, - "mutually_exclusive": mutually_exclusive} - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - @staticmethod - def _validate_module_params(clc, module): - """ - Validate the module params, and lookup default values. - :param clc: clc-sdk instance to use - :param module: module to validate - :return: dictionary of validated params - """ - params = module.params - datacenter = ClcServer._find_datacenter(clc, module) - - ClcServer._validate_types(module) - ClcServer._validate_name(module) - - params['alias'] = ClcServer._find_alias(clc, module) - params['cpu'] = ClcServer._find_cpu(clc, module) - params['memory'] = ClcServer._find_memory(clc, module) - params['description'] = ClcServer._find_description(module) - params['ttl'] = ClcServer._find_ttl(clc, module) - params['template'] = ClcServer._find_template_id(module, datacenter) - params['group'] = ClcServer._find_group(module, datacenter).id - params['network_id'] = ClcServer._find_network_id(module, datacenter) - params['anti_affinity_policy_id'] = ClcServer._find_aa_policy_id( - clc, - module) - params['alert_policy_id'] = ClcServer._find_alert_policy_id( - clc, - module) - - return params - - @staticmethod - def _find_datacenter(clc, module): - """ - Find the datacenter by calling the CLC API. - :param clc: clc-sdk instance to use - :param module: module to validate - :return: clc-sdk.Datacenter instance - """ - location = module.params.get('location') - try: - if not location: - account = clc.v2.Account() - location = account.data.get('primaryDataCenter') - data_center = clc.v2.Datacenter(location) - return data_center - except CLCException: - module.fail_json(msg="Unable to find location: {0}".format(location)) - - @staticmethod - def _find_alias(clc, module): - """ - Find or Validate the Account Alias by calling the CLC API - :param clc: clc-sdk instance to use - :param module: module to validate - :return: clc-sdk.Account instance - """ - alias = module.params.get('alias') - if not alias: - try: - alias = clc.v2.Account.GetAlias() - except CLCException as ex: - module.fail_json(msg='Unable to find account alias. {0}'.format( - ex.message - )) - return alias - - @staticmethod - def _find_cpu(clc, module): - """ - Find or validate the CPU value by calling the CLC API - :param clc: clc-sdk instance to use - :param module: module to validate - :return: Int value for CPU - """ - cpu = module.params.get('cpu') - group_id = module.params.get('group_id') - alias = module.params.get('alias') - state = module.params.get('state') - - if not cpu and state == 'present': - group = clc.v2.Group(id=group_id, - alias=alias) - if group.Defaults("cpu"): - cpu = group.Defaults("cpu") - else: - module.fail_json( - msg=str("Can\'t determine a default cpu value. Please provide a value for cpu.")) - return cpu - - @staticmethod - def _find_memory(clc, module): - """ - Find or validate the Memory value by calling the CLC API - :param clc: clc-sdk instance to use - :param module: module to validate - :return: Int value for Memory - """ - memory = module.params.get('memory') - group_id = module.params.get('group_id') - alias = module.params.get('alias') - state = module.params.get('state') - - if not memory and state == 'present': - group = clc.v2.Group(id=group_id, - alias=alias) - if group.Defaults("memory"): - memory = group.Defaults("memory") - else: - module.fail_json(msg=str( - "Can\'t determine a default memory value. Please provide a value for memory.")) - return memory - - @staticmethod - def _find_description(module): - """ - Set the description module param to name if description is blank - :param module: the module to validate - :return: string description - """ - description = module.params.get('description') - if not description: - description = module.params.get('name') - return description - - @staticmethod - def _validate_types(module): - """ - Validate that type and storage_type are set appropriately, and fail if not - :param module: the module to validate - :return: none - """ - state = module.params.get('state') - server_type = module.params.get( - 'type').lower() if module.params.get('type') else None - storage_type = module.params.get( - 'storage_type').lower() if module.params.get('storage_type') else None - - if state == "present": - if server_type == "standard" and storage_type not in ( - "standard", "premium"): - module.fail_json( - msg=str("Standard VMs must have storage_type = 'standard' or 'premium'")) - - if server_type == "hyperscale" and storage_type != "hyperscale": - module.fail_json( - msg=str("Hyperscale VMs must have storage_type = 'hyperscale'")) - - @staticmethod - def _validate_name(module): - """ - Validate that name is the correct length if provided, fail if it's not - :param module: the module to validate - :return: none - """ - server_name = module.params.get('name') - state = module.params.get('state') - - if state == 'present' and ( - len(server_name) < 1 or len(server_name) > 6): - module.fail_json(msg=str( - "When state = 'present', name must be a string with a minimum length of 1 and a maximum length of 6")) - - @staticmethod - def _find_ttl(clc, module): - """ - Validate that TTL is > 3600 if set, and fail if not - :param clc: clc-sdk instance to use - :param module: module to validate - :return: validated ttl - """ - ttl = module.params.get('ttl') - - if ttl: - if ttl <= 3600: - return module.fail_json(msg=str("Ttl cannot be <= 3600")) - else: - ttl = clc.v2.time_utils.SecondsToZuluTS(int(time.time()) + ttl) - return ttl - - @staticmethod - def _find_template_id(module, datacenter): - """ - Find the template id by calling the CLC API. - :param module: the module to validate - :param datacenter: the datacenter to search for the template - :return: a valid clc template id - """ - lookup_template = module.params.get('template') - state = module.params.get('state') - type = module.params.get('type') - result = None - - if state == 'present' and type != 'bareMetal': - try: - result = datacenter.Templates().Search(lookup_template)[0].id - except CLCException: - module.fail_json( - msg=str( - "Unable to find a template: " + - lookup_template + - " in location: " + - datacenter.id)) - return result - - @staticmethod - def _find_network_id(module, datacenter): - """ - Validate the provided network id or return a default. - :param module: the module to validate - :param datacenter: the datacenter to search for a network id - :return: a valid network id - """ - network_id = module.params.get('network_id') - - if not network_id: - try: - network_id = datacenter.Networks().networks[0].id - # -- added for clc-sdk 2.23 compatibility - # datacenter_networks = clc_sdk.v2.Networks( - # networks_lst=datacenter._DeploymentCapabilities()['deployableNetworks']) - # network_id = datacenter_networks.networks[0].id - # -- end - except CLCException: - module.fail_json( - msg=str( - "Unable to find a network in location: " + - datacenter.id)) - - return network_id - - @staticmethod - def _find_aa_policy_id(clc, module): - """ - Validate if the anti affinity policy exist for the given name and throw error if not - :param clc: the clc-sdk instance - :param module: the module to validate - :return: aa_policy_id: the anti affinity policy id of the given name. - """ - aa_policy_id = module.params.get('anti_affinity_policy_id') - aa_policy_name = module.params.get('anti_affinity_policy_name') - if not aa_policy_id and aa_policy_name: - alias = module.params.get('alias') - aa_policy_id = ClcServer._get_anti_affinity_policy_id( - clc, - module, - alias, - aa_policy_name) - if not aa_policy_id: - module.fail_json( - msg='No anti affinity policy was found with policy name : %s' % aa_policy_name) - return aa_policy_id - - @staticmethod - def _find_alert_policy_id(clc, module): - """ - Validate if the alert policy exist for the given name and throw error if not - :param clc: the clc-sdk instance - :param module: the module to validate - :return: alert_policy_id: the alert policy id of the given name. - """ - alert_policy_id = module.params.get('alert_policy_id') - alert_policy_name = module.params.get('alert_policy_name') - if not alert_policy_id and alert_policy_name: - alias = module.params.get('alias') - alert_policy_id = ClcServer._get_alert_policy_id_by_name( - clc=clc, - module=module, - alias=alias, - alert_policy_name=alert_policy_name - ) - if not alert_policy_id: - module.fail_json( - msg='No alert policy exist with name : %s' % alert_policy_name) - return alert_policy_id - - def _create_servers(self, module, clc, override_count=None): - """ - Create New Servers in CLC cloud - :param module: the AnsibleModule object - :param clc: the clc-sdk instance to use - :return: a list of dictionaries with server information about the servers that were created - """ - p = module.params - request_list = [] - servers = [] - server_dict_array = [] - created_server_ids = [] - partial_created_servers_ids = [] - - add_public_ip = p.get('add_public_ip') - public_ip_protocol = p.get('public_ip_protocol') - public_ip_ports = p.get('public_ip_ports') - - params = { - 'name': p.get('name'), - 'template': p.get('template'), - 'group_id': p.get('group'), - 'network_id': p.get('network_id'), - 'cpu': p.get('cpu'), - 'memory': p.get('memory'), - 'alias': p.get('alias'), - 'password': p.get('password'), - 'ip_address': p.get('ip_address'), - 'storage_type': p.get('storage_type'), - 'type': p.get('type'), - 'primary_dns': p.get('primary_dns'), - 'secondary_dns': p.get('secondary_dns'), - 'additional_disks': p.get('additional_disks'), - 'custom_fields': p.get('custom_fields'), - 'ttl': p.get('ttl'), - 'managed_os': p.get('managed_os'), - 'description': p.get('description'), - 'source_server_password': p.get('source_server_password'), - 'cpu_autoscale_policy_id': p.get('cpu_autoscale_policy_id'), - 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'), - 'packages': p.get('packages'), - 'configuration_id': p.get('configuration_id'), - 'os_type': p.get('os_type') - } - - count = override_count if override_count else p.get('count') - - changed = False if count == 0 else True - - if not changed: - return server_dict_array, created_server_ids, partial_created_servers_ids, changed - for i in range(0, count): - if not module.check_mode: - req = self._create_clc_server(clc=clc, - module=module, - server_params=params) - server = req.requests[0].Server() - request_list.append(req) - servers.append(server) - - self._wait_for_requests(module, request_list) - self._refresh_servers(module, servers) - - ip_failed_servers = self._add_public_ip_to_servers( - module=module, - should_add_public_ip=add_public_ip, - servers=servers, - public_ip_protocol=public_ip_protocol, - public_ip_ports=public_ip_ports) - ap_failed_servers = self._add_alert_policy_to_servers(clc=clc, - module=module, - servers=servers) - - for server in servers: - if server in ip_failed_servers or server in ap_failed_servers: - partial_created_servers_ids.append(server.id) - else: - # reload server details - server = clc.v2.Server(server.id) - server.data['ipaddress'] = server.details[ - 'ipAddresses'][0]['internal'] - - if add_public_ip and len(server.PublicIPs().public_ips) > 0: - server.data['publicip'] = str( - server.PublicIPs().public_ips[0]) - created_server_ids.append(server.id) - server_dict_array.append(server.data) - - return server_dict_array, created_server_ids, partial_created_servers_ids, changed - - def _enforce_count(self, module, clc): - """ - Enforce that there is the right number of servers in the provided group. - Starts or stops servers as necessary. - :param module: the AnsibleModule object - :param clc: the clc-sdk instance to use - :return: a list of dictionaries with server information about the servers that were created or deleted - """ - p = module.params - changed = False - count_group = p.get('count_group') - datacenter = ClcServer._find_datacenter(clc, module) - exact_count = p.get('exact_count') - server_dict_array = [] - partial_servers_ids = [] - changed_server_ids = [] - - # fail here if the exact count was specified without filtering - # on a group, as this may lead to a undesired removal of instances - if exact_count and count_group is None: - return module.fail_json( - msg="you must use the 'count_group' option with exact_count") - - servers, running_servers = ClcServer._find_running_servers_by_group( - module, datacenter, count_group) - - if len(running_servers) == exact_count: - changed = False - - elif len(running_servers) < exact_count: - to_create = exact_count - len(running_servers) - server_dict_array, changed_server_ids, partial_servers_ids, changed \ - = self._create_servers(module, clc, override_count=to_create) - - for server in server_dict_array: - running_servers.append(server) - - elif len(running_servers) > exact_count: - to_remove = len(running_servers) - exact_count - all_server_ids = sorted([x.id for x in running_servers]) - remove_ids = all_server_ids[0:to_remove] - - (changed, server_dict_array, changed_server_ids) \ - = ClcServer._delete_servers(module, clc, remove_ids) - - return server_dict_array, changed_server_ids, partial_servers_ids, changed - - @staticmethod - def _wait_for_requests(module, request_list): - """ - Block until server provisioning requests are completed. - :param module: the AnsibleModule object - :param request_list: a list of clc-sdk.Request instances - :return: none - """ - wait = module.params.get('wait') - if wait: - # Requests.WaitUntilComplete() returns the count of failed requests - failed_requests_count = sum( - [request.WaitUntilComplete() for request in request_list]) - - if failed_requests_count > 0: - module.fail_json( - msg='Unable to process server request') - - @staticmethod - def _refresh_servers(module, servers): - """ - Loop through a list of servers and refresh them. - :param module: the AnsibleModule object - :param servers: list of clc-sdk.Server instances to refresh - :return: none - """ - for server in servers: - try: - server.Refresh() - except CLCException as ex: - module.fail_json(msg='Unable to refresh the server {0}. {1}'.format( - server.id, ex.message - )) - - @staticmethod - def _add_public_ip_to_servers( - module, - should_add_public_ip, - servers, - public_ip_protocol, - public_ip_ports): - """ - Create a public IP for servers - :param module: the AnsibleModule object - :param should_add_public_ip: boolean - whether or not to provision a public ip for servers. Skipped if False - :param servers: List of servers to add public ips to - :param public_ip_protocol: a protocol to allow for the public ips - :param public_ip_ports: list of ports to allow for the public ips - :return: none - """ - failed_servers = [] - if not should_add_public_ip: - return failed_servers - - ports_lst = [] - request_list = [] - server = None - - for port in public_ip_ports: - ports_lst.append( - {'protocol': public_ip_protocol, 'port': port}) - try: - if not module.check_mode: - for server in servers: - request = server.PublicIPs().Add(ports_lst) - request_list.append(request) - except APIFailedResponse: - failed_servers.append(server) - ClcServer._wait_for_requests(module, request_list) - return failed_servers - - @staticmethod - def _add_alert_policy_to_servers(clc, module, servers): - """ - Associate the alert policy to servers - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param servers: List of servers to add alert policy to - :return: failed_servers: the list of servers which failed while associating alert policy - """ - failed_servers = [] - p = module.params - alert_policy_id = p.get('alert_policy_id') - alias = p.get('alias') - - if alert_policy_id and not module.check_mode: - for server in servers: - try: - ClcServer._add_alert_policy_to_server( - clc=clc, - alias=alias, - server_id=server.id, - alert_policy_id=alert_policy_id) - except CLCException: - failed_servers.append(server) - return failed_servers - - @staticmethod - def _add_alert_policy_to_server( - clc, alias, server_id, alert_policy_id): - """ - Associate an alert policy to a clc server - :param clc: the clc-sdk instance to use - :param alias: the clc account alias - :param server_id: The clc server id - :param alert_policy_id: the alert policy id to be associated to the server - :return: none - """ - try: - clc.v2.API.Call( - method='POST', - url='servers/%s/%s/alertPolicies' % (alias, server_id), - payload=json.dumps( - { - 'id': alert_policy_id - })) - except APIFailedResponse as e: - raise CLCException( - 'Failed to associate alert policy to the server : {0} with Error {1}'.format( - server_id, str(e.response_text))) - - @staticmethod - def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name): - """ - Returns the alert policy id for the given alert policy name - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param alias: the clc account alias - :param alert_policy_name: the name of the alert policy - :return: alert_policy_id: the alert policy id - """ - alert_policy_id = None - policies = clc.v2.API.Call('GET', '/v2/alertPolicies/%s' % alias) - if not policies: - return alert_policy_id - for policy in policies.get('items'): - if policy.get('name') == alert_policy_name: - if not alert_policy_id: - alert_policy_id = policy.get('id') - else: - return module.fail_json( - msg='multiple alert policies were found with policy name : %s' % alert_policy_name) - return alert_policy_id - - @staticmethod - def _delete_servers(module, clc, server_ids): - """ - Delete the servers on the provided list - :param module: the AnsibleModule object - :param clc: the clc-sdk instance to use - :param server_ids: list of servers to delete - :return: a list of dictionaries with server information about the servers that were deleted - """ - terminated_server_ids = [] - server_dict_array = [] - request_list = [] - - if not isinstance(server_ids, list) or len(server_ids) < 1: - return module.fail_json( - msg='server_ids should be a list of servers, aborting') - - servers = clc.v2.Servers(server_ids).Servers() - for server in servers: - if not module.check_mode: - request_list.append(server.Delete()) - ClcServer._wait_for_requests(module, request_list) - - for server in servers: - terminated_server_ids.append(server.id) - - return True, server_dict_array, terminated_server_ids - - @staticmethod - def _start_stop_servers(module, clc, server_ids): - """ - Start or Stop the servers on the provided list - :param module: the AnsibleModule object - :param clc: the clc-sdk instance to use - :param server_ids: list of servers to start or stop - :return: a list of dictionaries with server information about the servers that were started or stopped - """ - p = module.params - state = p.get('state') - changed = False - changed_servers = [] - server_dict_array = [] - result_server_ids = [] - request_list = [] - - if not isinstance(server_ids, list) or len(server_ids) < 1: - return module.fail_json( - msg='server_ids should be a list of servers, aborting') - - servers = clc.v2.Servers(server_ids).Servers() - for server in servers: - if server.powerState != state: - changed_servers.append(server) - if not module.check_mode: - request_list.append( - ClcServer._change_server_power_state( - module, - server, - state)) - changed = True - - ClcServer._wait_for_requests(module, request_list) - ClcServer._refresh_servers(module, changed_servers) - - for server in set(changed_servers + servers): - try: - server.data['ipaddress'] = server.details[ - 'ipAddresses'][0]['internal'] - server.data['publicip'] = str( - server.PublicIPs().public_ips[0]) - except (KeyError, IndexError): - pass - - server_dict_array.append(server.data) - result_server_ids.append(server.id) - - return changed, server_dict_array, result_server_ids - - @staticmethod - def _change_server_power_state(module, server, state): - """ - Change the server powerState - :param module: the module to check for intended state - :param server: the server to start or stop - :param state: the intended powerState for the server - :return: the request object from clc-sdk call - """ - result = None - try: - if state == 'started': - result = server.PowerOn() - else: - # Try to shut down the server and fall back to power off when unable to shut down. - result = server.ShutDown() - if result and hasattr(result, 'requests') and result.requests[0]: - return result - else: - result = server.PowerOff() - except CLCException: - module.fail_json( - msg='Unable to change power state for server {0}'.format( - server.id)) - return result - - @staticmethod - def _find_running_servers_by_group(module, datacenter, count_group): - """ - Find a list of running servers in the provided group - :param module: the AnsibleModule object - :param datacenter: the clc-sdk.Datacenter instance to use to lookup the group - :param count_group: the group to count the servers - :return: list of servers, and list of running servers - """ - group = ClcServer._find_group( - module=module, - datacenter=datacenter, - lookup_group=count_group) - - servers = group.Servers().Servers() - running_servers = [] - - for server in servers: - if server.status == 'active' and server.powerState == 'started': - running_servers.append(server) - - return servers, running_servers - - @staticmethod - def _find_group(module, datacenter, lookup_group=None): - """ - Find a server group in a datacenter by calling the CLC API - :param module: the AnsibleModule instance - :param datacenter: clc-sdk.Datacenter instance to search for the group - :param lookup_group: string name of the group to search for - :return: clc-sdk.Group instance - """ - if not lookup_group: - lookup_group = module.params.get('group') - try: - return datacenter.Groups().Get(lookup_group) - except CLCException: - pass - - # The search above only acts on the main - result = ClcServer._find_group_recursive( - module, - datacenter.Groups(), - lookup_group) - - if result is None: - module.fail_json( - msg=str( - "Unable to find group: " + - lookup_group + - " in location: " + - datacenter.id)) - - return result - - @staticmethod - def _find_group_recursive(module, group_list, lookup_group): - """ - Find a server group by recursively walking the tree - :param module: the AnsibleModule instance to use - :param group_list: a list of groups to search - :param lookup_group: the group to look for - :return: list of groups - """ - result = None - for group in group_list.groups: - subgroups = group.Subgroups() - try: - return subgroups.Get(lookup_group) - except CLCException: - result = ClcServer._find_group_recursive( - module, - subgroups, - lookup_group) - - if result is not None: - break - - return result - - @staticmethod - def _create_clc_server( - clc, - module, - server_params): - """ - Call the CLC Rest API to Create a Server - :param clc: the clc-python-sdk instance to use - :param module: the AnsibleModule instance to use - :param server_params: a dictionary of params to use to create the servers - :return: clc-sdk.Request object linked to the queued server request - """ - - try: - res = clc.v2.API.Call( - method='POST', - url='servers/%s' % - (server_params.get('alias')), - payload=json.dumps( - { - 'name': server_params.get('name'), - 'description': server_params.get('description'), - 'groupId': server_params.get('group_id'), - 'sourceServerId': server_params.get('template'), - 'isManagedOS': server_params.get('managed_os'), - 'primaryDNS': server_params.get('primary_dns'), - 'secondaryDNS': server_params.get('secondary_dns'), - 'networkId': server_params.get('network_id'), - 'ipAddress': server_params.get('ip_address'), - 'password': server_params.get('password'), - 'sourceServerPassword': server_params.get('source_server_password'), - 'cpu': server_params.get('cpu'), - 'cpuAutoscalePolicyId': server_params.get('cpu_autoscale_policy_id'), - 'memoryGB': server_params.get('memory'), - 'type': server_params.get('type'), - 'storageType': server_params.get('storage_type'), - 'antiAffinityPolicyId': server_params.get('anti_affinity_policy_id'), - 'customFields': server_params.get('custom_fields'), - 'additionalDisks': server_params.get('additional_disks'), - 'ttl': server_params.get('ttl'), - 'packages': server_params.get('packages'), - 'configurationId': server_params.get('configuration_id'), - 'osType': server_params.get('os_type')})) - - result = clc.v2.Requests(res) - except APIFailedResponse as ex: - return module.fail_json(msg='Unable to create the server: {0}. {1}'.format( - server_params.get('name'), - ex.response_text - )) - - # - # Patch the Request object so that it returns a valid server - - # Find the server's UUID from the API response - server_uuid = [obj['id'] - for obj in res['links'] if obj['rel'] == 'self'][0] - - # Change the request server method to a _find_server_by_uuid closure so - # that it will work - result.requests[0].Server = lambda: ClcServer._find_server_by_uuid_w_retry( - clc, - module, - server_uuid, - server_params.get('alias')) - - return result - - @staticmethod - def _get_anti_affinity_policy_id(clc, module, alias, aa_policy_name): - """ - retrieves the anti affinity policy id of the server based on the name of the policy - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param alias: the CLC account alias - :param aa_policy_name: the anti affinity policy name - :return: aa_policy_id: The anti affinity policy id - """ - aa_policy_id = None - try: - aa_policies = clc.v2.API.Call(method='GET', - url='antiAffinityPolicies/%s' % alias) - except APIFailedResponse as ex: - return module.fail_json(msg='Unable to fetch anti affinity policies for account: {0}. {1}'.format( - alias, ex.response_text)) - for aa_policy in aa_policies.get('items'): - if aa_policy.get('name') == aa_policy_name: - if not aa_policy_id: - aa_policy_id = aa_policy.get('id') - else: - return module.fail_json( - msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name) - return aa_policy_id - - # - # This is the function that gets patched to the Request.server object using a lamda closure - # - - @staticmethod - def _find_server_by_uuid_w_retry( - clc, module, svr_uuid, alias=None, retries=5, back_out=2): - """ - Find the clc server by the UUID returned from the provisioning request. Retry the request if a 404 is returned. - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param svr_uuid: UUID of the server - :param retries: the number of retry attempts to make prior to fail. default is 5 - :param alias: the Account Alias to search - :return: a clc-sdk.Server instance - """ - if not alias: - alias = clc.v2.Account.GetAlias() - - # Wait and retry if the api returns a 404 - while True: - retries -= 1 - try: - server_obj = clc.v2.API.Call( - method='GET', url='servers/%s/%s?uuid=true' % - (alias, svr_uuid)) - server_id = server_obj['id'] - server = clc.v2.Server( - id=server_id, - alias=alias, - server_obj=server_obj) - return server - - except APIFailedResponse as e: - if e.response_status_code != 404: - return module.fail_json( - msg='A failure response was received from CLC API when ' - 'attempting to get details for a server: UUID=%s, Code=%i, Message=%s' % - (svr_uuid, e.response_status_code, e.message)) - if retries == 0: - return module.fail_json( - msg='Unable to reach the CLC API after 5 attempts') - time.sleep(back_out) - back_out *= 2 - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - argument_dict = ClcServer._define_module_argument_spec() - module = AnsibleModule(supports_check_mode=True, **argument_dict) - clc_server = ClcServer(module) - clc_server.process_request() - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_server_snapshot.py b/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_server_snapshot.py deleted file mode 100644 index 4de4c993..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_server_snapshot.py +++ /dev/null @@ -1,411 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_server_snapshot -short_description: Create, Delete and Restore server snapshots in CenturyLink Cloud. -description: - - An Ansible module to Create, Delete and Restore server snapshots in CenturyLink Cloud. -options: - server_ids: - description: - - The list of CLC server Ids. - type: list - required: True - elements: str - expiration_days: - description: - - The number of days to keep the server snapshot before it expires. - type: int - default: 7 - required: False - state: - description: - - The state to insure that the provided resources are in. - type: str - default: 'present' - required: False - choices: ['present', 'absent', 'restore'] - wait: - description: - - Whether to wait for the provisioning tasks to finish before returning. - default: 'True' - required: False - type: str -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - -- name: Create server snapshot - community.general.clc_server_snapshot: - server_ids: - - UC1TEST-SVR01 - - UC1TEST-SVR02 - expiration_days: 10 - wait: True - state: present - -- name: Restore server snapshot - community.general.clc_server_snapshot: - server_ids: - - UC1TEST-SVR01 - - UC1TEST-SVR02 - wait: True - state: restore - -- name: Delete server snapshot - community.general.clc_server_snapshot: - server_ids: - - UC1TEST-SVR01 - - UC1TEST-SVR02 - wait: True - state: absent -''' - -RETURN = ''' -server_ids: - description: The list of server ids that are changed - returned: success - type: list - sample: - [ - "UC1TEST-SVR01", - "UC1TEST-SVR02" - ] -''' - -__version__ = '${version}' - -import os -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcSnapshot: - - clc = clc_sdk - module = None - - def __init__(self, module): - """ - Construct module - """ - self.module = module - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - p = self.module.params - server_ids = p['server_ids'] - expiration_days = p['expiration_days'] - state = p['state'] - request_list = [] - changed = False - changed_servers = [] - - self._set_clc_credentials_from_env() - if state == 'present': - changed, request_list, changed_servers = self.ensure_server_snapshot_present( - server_ids=server_ids, - expiration_days=expiration_days) - elif state == 'absent': - changed, request_list, changed_servers = self.ensure_server_snapshot_absent( - server_ids=server_ids) - elif state == 'restore': - changed, request_list, changed_servers = self.ensure_server_snapshot_restore( - server_ids=server_ids) - - self._wait_for_requests_to_complete(request_list) - return self.module.exit_json( - changed=changed, - server_ids=changed_servers) - - def ensure_server_snapshot_present(self, server_ids, expiration_days): - """ - Ensures the given set of server_ids have the snapshots created - :param server_ids: The list of server_ids to create the snapshot - :param expiration_days: The number of days to keep the snapshot - :return: (changed, request_list, changed_servers) - changed: A flag indicating whether any change was made - request_list: the list of clc request objects from CLC API call - changed_servers: The list of servers ids that are modified - """ - request_list = [] - changed = False - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - servers_to_change = [ - server for server in servers if len( - server.GetSnapshots()) == 0] - for server in servers_to_change: - changed = True - if not self.module.check_mode: - request = self._create_server_snapshot(server, expiration_days) - request_list.append(request) - changed_servers = [ - server.id for server in servers_to_change if server.id] - return changed, request_list, changed_servers - - def _create_server_snapshot(self, server, expiration_days): - """ - Create the snapshot for the CLC server - :param server: the CLC server object - :param expiration_days: The number of days to keep the snapshot - :return: the create request object from CLC API Call - """ - result = None - try: - result = server.CreateSnapshot( - delete_existing=True, - expiration_days=expiration_days) - except CLCException as ex: - self.module.fail_json(msg='Failed to create snapshot for server : {0}. {1}'.format( - server.id, ex.response_text - )) - return result - - def ensure_server_snapshot_absent(self, server_ids): - """ - Ensures the given set of server_ids have the snapshots removed - :param server_ids: The list of server_ids to delete the snapshot - :return: (changed, request_list, changed_servers) - changed: A flag indicating whether any change was made - request_list: the list of clc request objects from CLC API call - changed_servers: The list of servers ids that are modified - """ - request_list = [] - changed = False - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - servers_to_change = [ - server for server in servers if len( - server.GetSnapshots()) > 0] - for server in servers_to_change: - changed = True - if not self.module.check_mode: - request = self._delete_server_snapshot(server) - request_list.append(request) - changed_servers = [ - server.id for server in servers_to_change if server.id] - return changed, request_list, changed_servers - - def _delete_server_snapshot(self, server): - """ - Delete snapshot for the CLC server - :param server: the CLC server object - :return: the delete snapshot request object from CLC API - """ - result = None - try: - result = server.DeleteSnapshot() - except CLCException as ex: - self.module.fail_json(msg='Failed to delete snapshot for server : {0}. {1}'.format( - server.id, ex.response_text - )) - return result - - def ensure_server_snapshot_restore(self, server_ids): - """ - Ensures the given set of server_ids have the snapshots restored - :param server_ids: The list of server_ids to delete the snapshot - :return: (changed, request_list, changed_servers) - changed: A flag indicating whether any change was made - request_list: the list of clc request objects from CLC API call - changed_servers: The list of servers ids that are modified - """ - request_list = [] - changed = False - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - servers_to_change = [ - server for server in servers if len( - server.GetSnapshots()) > 0] - for server in servers_to_change: - changed = True - if not self.module.check_mode: - request = self._restore_server_snapshot(server) - request_list.append(request) - changed_servers = [ - server.id for server in servers_to_change if server.id] - return changed, request_list, changed_servers - - def _restore_server_snapshot(self, server): - """ - Restore snapshot for the CLC server - :param server: the CLC server object - :return: the restore snapshot request object from CLC API - """ - result = None - try: - result = server.RestoreSnapshot() - except CLCException as ex: - self.module.fail_json(msg='Failed to restore snapshot for server : {0}. {1}'.format( - server.id, ex.response_text - )) - return result - - def _wait_for_requests_to_complete(self, requests_lst): - """ - Waits until the CLC requests are complete if the wait argument is True - :param requests_lst: The list of CLC request objects - :return: none - """ - if not self.module.params['wait']: - return - for request in requests_lst: - request.WaitUntilComplete() - for request_details in request.requests: - if request_details.Status() != 'succeeded': - self.module.fail_json( - msg='Unable to process server snapshot request') - - @staticmethod - def define_argument_spec(): - """ - This function defines the dictionary object required for - package module - :return: the package dictionary object - """ - argument_spec = dict( - server_ids=dict(type='list', required=True, elements='str'), - expiration_days=dict(default=7, type='int'), - wait=dict(default=True), - state=dict( - default='present', - choices=[ - 'present', - 'absent', - 'restore']), - ) - return argument_spec - - def _get_servers_from_clc(self, server_list, message): - """ - Internal function to fetch list of CLC server objects from a list of server ids - :param server_list: The list of server ids - :param message: The error message to throw in case of any error - :return the list of CLC server objects - """ - try: - return self.clc.v2.Servers(server_list).servers - except CLCException as ex: - return self.module.fail_json(msg=message + ': %s' % ex) - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - Main function - :return: None - """ - module = AnsibleModule( - argument_spec=ClcSnapshot.define_argument_spec(), - supports_check_mode=True - ) - clc_snapshot = ClcSnapshot(module) - clc_snapshot.process_request() - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/dimensiondata/dimensiondata_network.py b/ansible_collections/community/general/plugins/modules/cloud/dimensiondata/dimensiondata_network.py deleted file mode 100644 index 64cc8b11..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/dimensiondata/dimensiondata_network.py +++ /dev/null @@ -1,296 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2016 Dimension Data -# Authors: -# - Aimon Bustardo -# - Bert Diwa -# - Adam Friedman -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: dimensiondata_network -short_description: Create, update, and delete MCP 1.0 & 2.0 networks -extends_documentation_fragment: -- community.general.dimensiondata -- community.general.dimensiondata_wait - -description: - - Create, update, and delete MCP 1.0 & 2.0 networks -author: 'Aimon Bustardo (@aimonb)' -options: - name: - description: - - The name of the network domain to create. - required: true - type: str - description: - description: - - Additional description of the network domain. - required: false - type: str - service_plan: - description: - - The service plan, either "ESSENTIALS" or "ADVANCED". - - MCP 2.0 Only. - choices: [ESSENTIALS, ADVANCED] - default: ESSENTIALS - type: str - state: - description: - - Should the resource be present or absent. - choices: [present, absent] - default: present - type: str -''' - -EXAMPLES = ''' -- name: Create an MCP 1.0 network - community.general.dimensiondata_network: - region: na - location: NA5 - name: mynet - -- name: Create an MCP 2.0 network - community.general.dimensiondata_network: - region: na - mcp_user: my_user - mcp_password: my_password - location: NA9 - name: mynet - service_plan: ADVANCED - -- name: Delete a network - community.general.dimensiondata_network: - region: na - location: NA1 - name: mynet - state: absent -''' - -RETURN = ''' -network: - description: Dictionary describing the network. - returned: On success when I(state=present). - type: complex - contains: - id: - description: Network ID. - type: str - sample: "8c787000-a000-4050-a215-280893411a7d" - name: - description: Network name. - type: str - sample: "My network" - description: - description: Network description. - type: str - sample: "My network description" - location: - description: Datacenter location. - type: str - sample: NA3 - status: - description: Network status. (MCP 2.0 only) - type: str - sample: NORMAL - private_net: - description: Private network subnet. (MCP 1.0 only) - type: str - sample: "10.2.3.0" - multicast: - description: Multicast enabled? (MCP 1.0 only) - type: bool - sample: false -''' -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.dimensiondata import HAS_LIBCLOUD, DimensionDataModule -from ansible.module_utils.common.text.converters import to_native - -if HAS_LIBCLOUD: - from libcloud.compute.base import NodeLocation - from libcloud.common.dimensiondata import DimensionDataAPIException - - -class DimensionDataNetworkModule(DimensionDataModule): - """ - The dimensiondata_network module for Ansible. - """ - - def __init__(self): - """ - Create a new Dimension Data network module. - """ - - super(DimensionDataNetworkModule, self).__init__( - module=AnsibleModule( - argument_spec=DimensionDataModule.argument_spec_with_wait( - name=dict(type='str', required=True), - description=dict(type='str', required=False), - service_plan=dict(default='ESSENTIALS', choices=['ADVANCED', 'ESSENTIALS']), - state=dict(default='present', choices=['present', 'absent']) - ), - required_together=DimensionDataModule.required_together() - ) - ) - - self.name = self.module.params['name'] - self.description = self.module.params['description'] - self.service_plan = self.module.params['service_plan'] - self.state = self.module.params['state'] - - def state_present(self): - network = self._get_network() - - if network: - self.module.exit_json( - changed=False, - msg='Network already exists', - network=self._network_to_dict(network) - ) - - network = self._create_network() - - self.module.exit_json( - changed=True, - msg='Created network "%s" in datacenter "%s".' % (self.name, self.location), - network=self._network_to_dict(network) - ) - - def state_absent(self): - network = self._get_network() - - if not network: - self.module.exit_json( - changed=False, - msg='Network "%s" does not exist' % self.name, - network=self._network_to_dict(network) - ) - - self._delete_network(network) - - def _get_network(self): - if self.mcp_version == '1.0': - networks = self.driver.list_networks(location=self.location) - else: - networks = self.driver.ex_list_network_domains(location=self.location) - - matched_network = [network for network in networks if network.name == self.name] - if matched_network: - return matched_network[0] - - return None - - def _network_to_dict(self, network): - network_dict = dict( - id=network.id, - name=network.name, - description=network.description - ) - - if isinstance(network.location, NodeLocation): - network_dict['location'] = network.location.id - else: - network_dict['location'] = network.location - - if self.mcp_version == '1.0': - network_dict['private_net'] = network.private_net - network_dict['multicast'] = network.multicast - network_dict['status'] = None - else: - network_dict['private_net'] = None - network_dict['multicast'] = None - network_dict['status'] = network.status - - return network_dict - - def _create_network(self): - - # Make sure service_plan argument is defined - if self.mcp_version == '2.0' and 'service_plan' not in self.module.params: - self.module.fail_json( - msg='service_plan required when creating network and location is MCP 2.0' - ) - - # Create network - try: - if self.mcp_version == '1.0': - network = self.driver.ex_create_network( - self.location, - self.name, - description=self.description - ) - else: - network = self.driver.ex_create_network_domain( - self.location, - self.name, - self.module.params['service_plan'], - description=self.description - ) - except DimensionDataAPIException as e: - - self.module.fail_json( - msg="Failed to create new network: %s" % to_native(e), exception=traceback.format_exc() - ) - - if self.module.params['wait'] is True: - network = self._wait_for_network_state(network.id, 'NORMAL') - - return network - - def _delete_network(self, network): - try: - if self.mcp_version == '1.0': - deleted = self.driver.ex_delete_network(network) - else: - deleted = self.driver.ex_delete_network_domain(network) - - if deleted: - self.module.exit_json( - changed=True, - msg="Deleted network with id %s" % network.id - ) - - self.module.fail_json( - "Unexpected failure deleting network with id %s" % network.id - ) - - except DimensionDataAPIException as e: - self.module.fail_json( - msg="Failed to delete network: %s" % to_native(e), exception=traceback.format_exc() - ) - - def _wait_for_network_state(self, net_id, state_to_wait_for): - try: - return self.driver.connection.wait_for_state( - state_to_wait_for, - self.driver.ex_get_network_domain, - self.module.params['wait_poll_interval'], - self.module.params['wait_time'], - net_id - ) - except DimensionDataAPIException as e: - self.module.fail_json( - msg='Network did not reach % state in time: %s' % (state_to_wait_for, to_native(e)), - exception=traceback.format_exc() - ) - - -def main(): - module = DimensionDataNetworkModule() - if module.state == 'present': - module.state_present() - elif module.state == 'absent': - module.state_absent() - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/dimensiondata/dimensiondata_vlan.py b/ansible_collections/community/general/plugins/modules/cloud/dimensiondata/dimensiondata_vlan.py deleted file mode 100644 index 26c621f4..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/dimensiondata/dimensiondata_vlan.py +++ /dev/null @@ -1,568 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2016 Dimension Data -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . -# -# Authors: -# - Adam Friedman -# - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: dimensiondata_vlan -short_description: Manage a VLAN in a Cloud Control network domain. -extends_documentation_fragment: -- community.general.dimensiondata -- community.general.dimensiondata_wait - -description: - - Manage VLANs in Cloud Control network domains. -author: 'Adam Friedman (@tintoy)' -options: - name: - description: - - The name of the target VLAN. - type: str - required: true - description: - description: - - A description of the VLAN. - type: str - network_domain: - description: - - The Id or name of the target network domain. - required: true - type: str - private_ipv4_base_address: - description: - - The base address for the VLAN's IPv4 network (e.g. 192.168.1.0). - type: str - private_ipv4_prefix_size: - description: - - The size of the IPv4 address space, e.g 24. - - Required, if C(private_ipv4_base_address) is specified. - type: int - state: - description: - - The desired state for the target VLAN. - - C(readonly) ensures that the state is only ever read, not modified (the module will fail if the resource does not exist). - choices: [present, absent, readonly] - default: present - type: str - allow_expand: - description: - - Permit expansion of the target VLAN's network if the module parameters specify a larger network than the VLAN currently possesses. - - If C(False), the module will fail under these conditions. - - This is intended to prevent accidental expansion of a VLAN's network (since this operation is not reversible). - type: bool - default: 'no' -''' - -EXAMPLES = ''' -- name: Add or update VLAN - community.general.dimensiondata_vlan: - region: na - location: NA5 - network_domain: test_network - name: my_vlan1 - description: A test VLAN - private_ipv4_base_address: 192.168.23.0 - private_ipv4_prefix_size: 24 - state: present - wait: yes - -- name: Read / get VLAN details - community.general.dimensiondata_vlan: - region: na - location: NA5 - network_domain: test_network - name: my_vlan1 - state: readonly - wait: yes - -- name: Delete a VLAN - community.general.dimensiondata_vlan: - region: na - location: NA5 - network_domain: test_network - name: my_vlan_1 - state: absent - wait: yes -''' - -RETURN = ''' -vlan: - description: Dictionary describing the VLAN. - returned: On success when I(state) is 'present' - type: complex - contains: - id: - description: VLAN ID. - type: str - sample: "aaaaa000-a000-4050-a215-2808934ccccc" - name: - description: VLAN name. - type: str - sample: "My VLAN" - description: - description: VLAN description. - type: str - sample: "My VLAN description" - location: - description: Datacenter location. - type: str - sample: NA3 - private_ipv4_base_address: - description: The base address for the VLAN's private IPV4 network. - type: str - sample: 192.168.23.0 - private_ipv4_prefix_size: - description: The prefix size for the VLAN's private IPV4 network. - type: int - sample: 24 - private_ipv4_gateway_address: - description: The gateway address for the VLAN's private IPV4 network. - type: str - sample: 192.168.23.1 - private_ipv6_base_address: - description: The base address for the VLAN's IPV6 network. - type: str - sample: 2402:9900:111:1195:0:0:0:0 - private_ipv6_prefix_size: - description: The prefix size for the VLAN's IPV6 network. - type: int - sample: 64 - private_ipv6_gateway_address: - description: The gateway address for the VLAN's IPV6 network. - type: str - sample: 2402:9900:111:1195:0:0:0:1 - status: - description: VLAN status. - type: str - sample: NORMAL -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.dimensiondata import DimensionDataModule, UnknownNetworkError - -try: - from libcloud.common.dimensiondata import DimensionDataVlan, DimensionDataAPIException - - HAS_LIBCLOUD = True - -except ImportError: - DimensionDataVlan = None - - HAS_LIBCLOUD = False - - -class DimensionDataVlanModule(DimensionDataModule): - """ - The dimensiondata_vlan module for Ansible. - """ - - def __init__(self): - """ - Create a new Dimension Data VLAN module. - """ - - super(DimensionDataVlanModule, self).__init__( - module=AnsibleModule( - argument_spec=DimensionDataModule.argument_spec_with_wait( - name=dict(required=True, type='str'), - description=dict(default='', type='str'), - network_domain=dict(required=True, type='str'), - private_ipv4_base_address=dict(default='', type='str'), - private_ipv4_prefix_size=dict(default=0, type='int'), - allow_expand=dict(required=False, default=False, type='bool'), - state=dict(default='present', choices=['present', 'absent', 'readonly']) - ), - required_together=DimensionDataModule.required_together() - ) - ) - - self.name = self.module.params['name'] - self.description = self.module.params['description'] - self.network_domain_selector = self.module.params['network_domain'] - self.private_ipv4_base_address = self.module.params['private_ipv4_base_address'] - self.private_ipv4_prefix_size = self.module.params['private_ipv4_prefix_size'] - self.state = self.module.params['state'] - self.allow_expand = self.module.params['allow_expand'] - - if self.wait and self.state != 'present': - self.module.fail_json( - msg='The wait parameter is only supported when state is "present".' - ) - - def state_present(self): - """ - Ensure that the target VLAN is present. - """ - - network_domain = self._get_network_domain() - - vlan = self._get_vlan(network_domain) - if not vlan: - if self.module.check_mode: - self.module.exit_json( - msg='VLAN "{0}" is absent from network domain "{1}" (should be present).'.format( - self.name, self.network_domain_selector - ), - changed=True - ) - - vlan = self._create_vlan(network_domain) - self.module.exit_json( - msg='Created VLAN "{0}" in network domain "{1}".'.format( - self.name, self.network_domain_selector - ), - vlan=vlan_to_dict(vlan), - changed=True - ) - else: - diff = VlanDiff(vlan, self.module.params) - if not diff.has_changes(): - self.module.exit_json( - msg='VLAN "{0}" is present in network domain "{1}" (no changes detected).'.format( - self.name, self.network_domain_selector - ), - vlan=vlan_to_dict(vlan), - changed=False - ) - - return - - try: - diff.ensure_legal_change() - except InvalidVlanChangeError as invalid_vlan_change: - self.module.fail_json( - msg='Unable to update VLAN "{0}" in network domain "{1}": {2}'.format( - self.name, self.network_domain_selector, invalid_vlan_change - ) - ) - - if diff.needs_expand() and not self.allow_expand: - self.module.fail_json( - msg='The configured private IPv4 network size ({0}-bit prefix) for '.format( - self.private_ipv4_prefix_size - ) + 'the VLAN differs from its current network size ({0}-bit prefix) '.format( - vlan.private_ipv4_range_size - ) + 'and needs to be expanded. Use allow_expand=true if this is what you want.' - ) - - if self.module.check_mode: - self.module.exit_json( - msg='VLAN "{0}" is present in network domain "{1}" (changes detected).'.format( - self.name, self.network_domain_selector - ), - vlan=vlan_to_dict(vlan), - changed=True - ) - - if diff.needs_edit(): - vlan.name = self.name - vlan.description = self.description - - self.driver.ex_update_vlan(vlan) - - if diff.needs_expand(): - vlan.private_ipv4_range_size = self.private_ipv4_prefix_size - self.driver.ex_expand_vlan(vlan) - - self.module.exit_json( - msg='Updated VLAN "{0}" in network domain "{1}".'.format( - self.name, self.network_domain_selector - ), - vlan=vlan_to_dict(vlan), - changed=True - ) - - def state_readonly(self): - """ - Read the target VLAN's state. - """ - - network_domain = self._get_network_domain() - - vlan = self._get_vlan(network_domain) - if vlan: - self.module.exit_json( - vlan=vlan_to_dict(vlan), - changed=False - ) - else: - self.module.fail_json( - msg='VLAN "{0}" does not exist in network domain "{1}".'.format( - self.name, self.network_domain_selector - ) - ) - - def state_absent(self): - """ - Ensure that the target VLAN is not present. - """ - - network_domain = self._get_network_domain() - - vlan = self._get_vlan(network_domain) - if not vlan: - self.module.exit_json( - msg='VLAN "{0}" is absent from network domain "{1}".'.format( - self.name, self.network_domain_selector - ), - changed=False - ) - - return - - if self.module.check_mode: - self.module.exit_json( - msg='VLAN "{0}" is present in network domain "{1}" (should be absent).'.format( - self.name, self.network_domain_selector - ), - vlan=vlan_to_dict(vlan), - changed=True - ) - - self._delete_vlan(vlan) - - self.module.exit_json( - msg='Deleted VLAN "{0}" from network domain "{1}".'.format( - self.name, self.network_domain_selector - ), - changed=True - ) - - def _get_vlan(self, network_domain): - """ - Retrieve the target VLAN details from CloudControl. - - :param network_domain: The target network domain. - :return: The VLAN, or None if the target VLAN was not found. - :rtype: DimensionDataVlan - """ - - vlans = self.driver.ex_list_vlans( - location=self.location, - network_domain=network_domain - ) - matching_vlans = [vlan for vlan in vlans if vlan.name == self.name] - if matching_vlans: - return matching_vlans[0] - - return None - - def _create_vlan(self, network_domain): - vlan = self.driver.ex_create_vlan( - network_domain, - self.name, - self.private_ipv4_base_address, - self.description, - self.private_ipv4_prefix_size - ) - - if self.wait: - vlan = self._wait_for_vlan_state(vlan.id, 'NORMAL') - - return vlan - - def _delete_vlan(self, vlan): - try: - self.driver.ex_delete_vlan(vlan) - - # Not currently supported for deletes due to a bug in libcloud (module will error out if "wait" is specified when "state" is not "present"). - if self.wait: - self._wait_for_vlan_state(vlan, 'NOT_FOUND') - - except DimensionDataAPIException as api_exception: - self.module.fail_json( - msg='Failed to delete VLAN "{0}" due to unexpected error from the CloudControl API: {1}'.format( - vlan.id, api_exception.msg - ) - ) - - def _wait_for_vlan_state(self, vlan, state_to_wait_for): - network_domain = self._get_network_domain() - - wait_poll_interval = self.module.params['wait_poll_interval'] - wait_time = self.module.params['wait_time'] - - # Bizarre bug in libcloud when checking status after delete; socket.error is too generic to catch in this context so for now we don't even try. - - try: - return self.driver.connection.wait_for_state( - state_to_wait_for, - self.driver.ex_get_vlan, - wait_poll_interval, - wait_time, - vlan - ) - - except DimensionDataAPIException as api_exception: - if api_exception.code != 'RESOURCE_NOT_FOUND': - raise - - return DimensionDataVlan( - id=vlan.id, - status='NOT_FOUND', - name='', - description='', - private_ipv4_range_address='', - private_ipv4_range_size=0, - ipv4_gateway='', - ipv6_range_address='', - ipv6_range_size=0, - ipv6_gateway='', - location=self.location, - network_domain=network_domain - ) - - def _get_network_domain(self): - """ - Retrieve the target network domain from the Cloud Control API. - - :return: The network domain. - """ - - try: - return self.get_network_domain( - self.network_domain_selector, self.location - ) - except UnknownNetworkError: - self.module.fail_json( - msg='Cannot find network domain "{0}" in datacenter "{1}".'.format( - self.network_domain_selector, self.location - ) - ) - - return None - - -class InvalidVlanChangeError(Exception): - """ - Error raised when an illegal change to VLAN state is attempted. - """ - - pass - - -class VlanDiff(object): - """ - Represents differences between VLAN information (from CloudControl) and module parameters. - """ - - def __init__(self, vlan, module_params): - """ - - :param vlan: The VLAN information from CloudControl. - :type vlan: DimensionDataVlan - :param module_params: The module parameters. - :type module_params: dict - """ - - self.vlan = vlan - self.module_params = module_params - - self.name_changed = module_params['name'] != vlan.name - self.description_changed = module_params['description'] != vlan.description - self.private_ipv4_base_address_changed = module_params['private_ipv4_base_address'] != vlan.private_ipv4_range_address - self.private_ipv4_prefix_size_changed = module_params['private_ipv4_prefix_size'] != vlan.private_ipv4_range_size - - # Is configured prefix size greater than or less than the actual prefix size? - private_ipv4_prefix_size_difference = module_params['private_ipv4_prefix_size'] - vlan.private_ipv4_range_size - self.private_ipv4_prefix_size_increased = private_ipv4_prefix_size_difference > 0 - self.private_ipv4_prefix_size_decreased = private_ipv4_prefix_size_difference < 0 - - def has_changes(self): - """ - Does the VlanDiff represent any changes between the VLAN and module configuration? - - :return: True, if there are change changes; otherwise, False. - """ - - return self.needs_edit() or self.needs_expand() - - def ensure_legal_change(self): - """ - Ensure the change (if any) represented by the VlanDiff represents a legal change to VLAN state. - - - private_ipv4_base_address cannot be changed - - private_ipv4_prefix_size must be greater than or equal to the VLAN's existing private_ipv4_range_size - - :raise InvalidVlanChangeError: The VlanDiff does not represent a legal change to VLAN state. - """ - - # Cannot change base address for private IPv4 network. - if self.private_ipv4_base_address_changed: - raise InvalidVlanChangeError('Cannot change the private IPV4 base address for an existing VLAN.') - - # Cannot shrink private IPv4 network (by increasing prefix size). - if self.private_ipv4_prefix_size_increased: - raise InvalidVlanChangeError('Cannot shrink the private IPV4 network for an existing VLAN (only expand is supported).') - - def needs_edit(self): - """ - Is an Edit operation required to resolve the differences between the VLAN information and the module parameters? - - :return: True, if an Edit operation is required; otherwise, False. - """ - - return self.name_changed or self.description_changed - - def needs_expand(self): - """ - Is an Expand operation required to resolve the differences between the VLAN information and the module parameters? - - The VLAN's network is expanded by reducing the size of its network prefix. - - :return: True, if an Expand operation is required; otherwise, False. - """ - - return self.private_ipv4_prefix_size_decreased - - -def vlan_to_dict(vlan): - return { - 'id': vlan.id, - 'name': vlan.name, - 'description': vlan.description, - 'location': vlan.location.id, - 'private_ipv4_base_address': vlan.private_ipv4_range_address, - 'private_ipv4_prefix_size': vlan.private_ipv4_range_size, - 'private_ipv4_gateway_address': vlan.ipv4_gateway, - 'ipv6_base_address': vlan.ipv6_range_address, - 'ipv6_prefix_size': vlan.ipv6_range_size, - 'ipv6_gateway_address': vlan.ipv6_gateway, - 'status': vlan.status - } - - -def main(): - module = DimensionDataVlanModule() - - if module.state == 'present': - module.state_present() - elif module.state == 'readonly': - module.state_readonly() - elif module.state == 'absent': - module.state_absent() - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/heroku/heroku_collaborator.py b/ansible_collections/community/general/plugins/modules/cloud/heroku/heroku_collaborator.py deleted file mode 100644 index bbc34fdb..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/heroku/heroku_collaborator.py +++ /dev/null @@ -1,130 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: heroku_collaborator -short_description: "Add or delete app collaborators on Heroku" -description: - - Manages collaborators for Heroku apps. - - If set to C(present) and heroku user is already collaborator, then do nothing. - - If set to C(present) and heroku user is not collaborator, then add user to app. - - If set to C(absent) and heroku user is collaborator, then delete user from app. -author: - - Marcel Arns (@marns93) -requirements: - - heroku3 -options: - api_key: - type: str - description: - - Heroku API key - apps: - type: list - elements: str - description: - - List of Heroku App names - required: true - suppress_invitation: - description: - - Suppress email invitation when creating collaborator - type: bool - default: "no" - user: - type: str - description: - - User ID or e-mail - required: true - state: - type: str - description: - - Create or remove the heroku collaborator - choices: ["present", "absent"] - default: "present" -notes: - - C(HEROKU_API_KEY) and C(TF_VAR_HEROKU_API_KEY) env variable can be used instead setting C(api_key). - - If you use I(--check), you can also pass the I(-v) flag to see affected apps in C(msg), e.g. ["heroku-example-app"]. -''' - -EXAMPLES = ''' -- name: Create a heroku collaborator - community.general.heroku_collaborator: - api_key: YOUR_API_KEY - user: max.mustermann@example.com - apps: heroku-example-app - state: present - -- name: An example of using the module in loop - community.general.heroku_collaborator: - api_key: YOUR_API_KEY - user: '{{ item.user }}' - apps: '{{ item.apps | default(apps) }}' - suppress_invitation: '{{ item.suppress_invitation | default(suppress_invitation) }}' - state: '{{ item.state | default("present") }}' - with_items: - - { user: 'a.b@example.com' } - - { state: 'absent', user: 'b.c@example.com', suppress_invitation: false } - - { user: 'x.y@example.com', apps: ["heroku-example-app"] } -''' - -RETURN = ''' # ''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.heroku import HerokuHelper - - -def add_or_delete_heroku_collaborator(module, client): - user = module.params['user'] - state = module.params['state'] - affected_apps = [] - result_state = False - - for app in module.params['apps']: - if app not in client.apps(): - module.fail_json(msg='App {0} does not exist'.format(app)) - - heroku_app = client.apps()[app] - - heroku_collaborator_list = [collaborator.user.email for collaborator in heroku_app.collaborators()] - - if state == 'absent' and user in heroku_collaborator_list: - if not module.check_mode: - heroku_app.remove_collaborator(user) - affected_apps += [app] - result_state = True - elif state == 'present' and user not in heroku_collaborator_list: - if not module.check_mode: - heroku_app.add_collaborator(user_id_or_email=user, silent=module.params['suppress_invitation']) - affected_apps += [app] - result_state = True - - return result_state, affected_apps - - -def main(): - argument_spec = HerokuHelper.heroku_argument_spec() - argument_spec.update( - user=dict(required=True, type='str'), - apps=dict(required=True, type='list', elements='str'), - suppress_invitation=dict(default=False, type='bool'), - state=dict(default='present', type='str', choices=['present', 'absent']), - ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True - ) - - client = HerokuHelper(module).get_heroku_client() - - has_changed, msg = add_or_delete_heroku_collaborator(module, client) - module.exit_json(changed=has_changed, msg=msg) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_ecs_instance.py b/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_ecs_instance.py deleted file mode 100644 index 3d4ba84b..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_ecs_instance.py +++ /dev/null @@ -1,2135 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -############################################################################### -# Documentation -############################################################################### - -DOCUMENTATION = ''' ---- -module: hwc_ecs_instance -description: - - instance management. -short_description: Creates a resource of Ecs/Instance in Huawei Cloud -version_added: '0.2.0' -author: Huawei Inc. (@huaweicloud) -requirements: - - keystoneauth1 >= 3.6.0 -options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: - description: - - The timeouts for each operations. - type: dict - suboptions: - create: - description: - - The timeouts for create operation. - type: str - default: '30m' - update: - description: - - The timeouts for update operation. - type: str - default: '30m' - delete: - description: - - The timeouts for delete operation. - type: str - default: '30m' - availability_zone: - description: - - Specifies the name of the AZ where the ECS is located. - type: str - required: true - flavor_name: - description: - - Specifies the name of the system flavor. - type: str - required: true - image_id: - description: - - Specifies the ID of the system image. - type: str - required: true - name: - description: - - Specifies the ECS name. Value requirements consists of 1 to 64 - characters, including letters, digits, underscores C(_), hyphens - (-), periods (.). - type: str - required: true - nics: - description: - - Specifies the NIC information of the ECS. Constraints the - network of the NIC must belong to the VPC specified by vpc_id. A - maximum of 12 NICs can be attached to an ECS. - type: list - elements: dict - required: true - suboptions: - ip_address: - description: - - Specifies the IP address of the NIC. The value is an IPv4 - address. Its value must be an unused IP - address in the network segment of the subnet. - type: str - required: true - subnet_id: - description: - - Specifies the ID of subnet. - type: str - required: true - root_volume: - description: - - Specifies the configuration of the ECS's system disks. - type: dict - required: true - suboptions: - volume_type: - description: - - Specifies the ECS system disk type. - - SATA is common I/O disk type. - - SAS is high I/O disk type. - - SSD is ultra-high I/O disk type. - - co-p1 is high I/O (performance-optimized I) disk type. - - uh-l1 is ultra-high I/O (latency-optimized) disk type. - - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1 - disks. For other ECSs, do not use co-p1 or uh-l1 disks. - type: str - required: true - size: - description: - - Specifies the system disk size, in GB. The value range is - 1 to 1024. The system disk size must be - greater than or equal to the minimum system disk size - supported by the image (min_disk attribute of the image). - If this parameter is not specified or is set to 0, the - default system disk size is the minimum value of the - system disk in the image (min_disk attribute of the - image). - type: int - required: false - snapshot_id: - description: - - Specifies the snapshot ID or ID of the original data disk - contained in the full-ECS image. - type: str - required: false - vpc_id: - description: - - Specifies the ID of the VPC to which the ECS belongs. - type: str - required: true - admin_pass: - description: - - Specifies the initial login password of the administrator account - for logging in to an ECS using password authentication. The Linux - administrator is root, and the Windows administrator is - Administrator. Password complexity requirements, consists of 8 to - 26 characters. The password must contain at least three of the - following character types 'uppercase letters, lowercase letters, - digits, and special characters (!@$%^-_=+[{}]:,./?)'. The password - cannot contain the username or the username in reverse. The - Windows ECS password cannot contain the username, the username in - reverse, or more than two consecutive characters in the username. - type: str - required: false - data_volumes: - description: - - Specifies the data disks of ECS instance. - type: list - elements: dict - required: false - suboptions: - volume_id: - description: - - Specifies the disk ID. - type: str - required: true - device: - description: - - Specifies the disk device name. - type: str - required: false - description: - description: - - Specifies the description of an ECS, which is a null string by - default. Can contain a maximum of 85 characters. Cannot contain - special characters, such as < and >. - type: str - required: false - eip_id: - description: - - Specifies the ID of the elastic IP address assigned to the ECS. - Only elastic IP addresses in the DOWN state can be - assigned. - type: str - required: false - enable_auto_recovery: - description: - - Specifies whether automatic recovery is enabled on the ECS. - type: bool - required: false - enterprise_project_id: - description: - - Specifies the ID of the enterprise project to which the ECS - belongs. - type: str - required: false - security_groups: - description: - - Specifies the security groups of the ECS. If this - parameter is left blank, the default security group is bound to - the ECS by default. - type: list - elements: str - required: false - server_metadata: - description: - - Specifies the metadata of ECS to be created. - type: dict - required: false - server_tags: - description: - - Specifies the tags of an ECS. When you create ECSs, one ECS - supports up to 10 tags. - type: dict - required: false - ssh_key_name: - description: - - Specifies the name of the SSH key used for logging in to the ECS. - type: str - required: false - user_data: - description: - - Specifies the user data to be injected during the ECS creation - process. Text, text files, and gzip files can be injected. - The content to be injected must be encoded with - base64. The maximum size of the content to be injected (before - encoding) is 32 KB. For Linux ECSs, this parameter does not take - effect when adminPass is used. - type: str - required: false -extends_documentation_fragment: -- community.general.hwc - -''' - -EXAMPLES = ''' -# create an ecs instance -- name: Create a vpc - hwc_network_vpc: - cidr: "192.168.100.0/24" - name: "ansible_network_vpc_test" - register: vpc -- name: Create a subnet - hwc_vpc_subnet: - gateway_ip: "192.168.100.32" - name: "ansible_network_subnet_test" - dhcp_enable: true - vpc_id: "{{ vpc.id }}" - cidr: "192.168.100.0/26" - register: subnet -- name: Create a eip - hwc_vpc_eip: - dedicated_bandwidth: - charge_mode: "traffic" - name: "ansible_test_dedicated_bandwidth" - size: 1 - type: "5_bgp" - register: eip -- name: Create a disk - hwc_evs_disk: - availability_zone: "cn-north-1a" - name: "ansible_evs_disk_test" - volume_type: "SATA" - size: 10 - register: disk -- name: Create an instance - community.general.hwc_ecs_instance: - data_volumes: - - volume_id: "{{ disk.id }}" - enable_auto_recovery: false - eip_id: "{{ eip.id }}" - name: "ansible_ecs_instance_test" - availability_zone: "cn-north-1a" - nics: - - subnet_id: "{{ subnet.id }}" - ip_address: "192.168.100.33" - - subnet_id: "{{ subnet.id }}" - ip_address: "192.168.100.34" - server_tags: - my_server: "my_server" - image_id: "8da46d6d-6079-4e31-ad6d-a7167efff892" - flavor_name: "s3.small.1" - vpc_id: "{{ vpc.id }}" - root_volume: - volume_type: "SAS" -''' - -RETURN = ''' - availability_zone: - description: - - Specifies the name of the AZ where the ECS is located. - type: str - returned: success - flavor_name: - description: - - Specifies the name of the system flavor. - type: str - returned: success - image_id: - description: - - Specifies the ID of the system image. - type: str - returned: success - name: - description: - - Specifies the ECS name. Value requirements "Consists of 1 to 64 - characters, including letters, digits, underscores C(_), hyphens - (-), periods (.)". - type: str - returned: success - nics: - description: - - Specifies the NIC information of the ECS. The - network of the NIC must belong to the VPC specified by vpc_id. A - maximum of 12 NICs can be attached to an ECS. - type: list - returned: success - contains: - ip_address: - description: - - Specifies the IP address of the NIC. The value is an IPv4 - address. Its value must be an unused IP - address in the network segment of the subnet. - type: str - returned: success - subnet_id: - description: - - Specifies the ID of subnet. - type: str - returned: success - port_id: - description: - - Specifies the port ID corresponding to the IP address. - type: str - returned: success - root_volume: - description: - - Specifies the configuration of the ECS's system disks. - type: dict - returned: success - contains: - volume_type: - description: - - Specifies the ECS system disk type. - - SATA is common I/O disk type. - - SAS is high I/O disk type. - - SSD is ultra-high I/O disk type. - - co-p1 is high I/O (performance-optimized I) disk type. - - uh-l1 is ultra-high I/O (latency-optimized) disk type. - - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1 - disks. For other ECSs, do not use co-p1 or uh-l1 disks. - type: str - returned: success - size: - description: - - Specifies the system disk size, in GB. The value range is - 1 to 1024. The system disk size must be - greater than or equal to the minimum system disk size - supported by the image (min_disk attribute of the image). - If this parameter is not specified or is set to 0, the - default system disk size is the minimum value of the - system disk in the image (min_disk attribute of the - image). - type: int - returned: success - snapshot_id: - description: - - Specifies the snapshot ID or ID of the original data disk - contained in the full-ECS image. - type: str - returned: success - device: - description: - - Specifies the disk device name. - type: str - returned: success - volume_id: - description: - - Specifies the disk ID. - type: str - returned: success - vpc_id: - description: - - Specifies the ID of the VPC to which the ECS belongs. - type: str - returned: success - admin_pass: - description: - - Specifies the initial login password of the administrator account - for logging in to an ECS using password authentication. The Linux - administrator is root, and the Windows administrator is - Administrator. Password complexity requirements consists of 8 to - 26 characters. The password must contain at least three of the - following character types "uppercase letters, lowercase letters, - digits, and special characters (!@$%^-_=+[{}]:,./?)". The password - cannot contain the username or the username in reverse. The - Windows ECS password cannot contain the username, the username in - reverse, or more than two consecutive characters in the username. - type: str - returned: success - data_volumes: - description: - - Specifies the data disks of ECS instance. - type: list - returned: success - contains: - volume_id: - description: - - Specifies the disk ID. - type: str - returned: success - device: - description: - - Specifies the disk device name. - type: str - returned: success - description: - description: - - Specifies the description of an ECS, which is a null string by - default. Can contain a maximum of 85 characters. Cannot contain - special characters, such as < and >. - type: str - returned: success - eip_id: - description: - - Specifies the ID of the elastic IP address assigned to the ECS. - Only elastic IP addresses in the DOWN state can be assigned. - type: str - returned: success - enable_auto_recovery: - description: - - Specifies whether automatic recovery is enabled on the ECS. - type: bool - returned: success - enterprise_project_id: - description: - - Specifies the ID of the enterprise project to which the ECS - belongs. - type: str - returned: success - security_groups: - description: - - Specifies the security groups of the ECS. If this parameter is left - blank, the default security group is bound to the ECS by default. - type: list - returned: success - server_metadata: - description: - - Specifies the metadata of ECS to be created. - type: dict - returned: success - server_tags: - description: - - Specifies the tags of an ECS. When you create ECSs, one ECS - supports up to 10 tags. - type: dict - returned: success - ssh_key_name: - description: - - Specifies the name of the SSH key used for logging in to the ECS. - type: str - returned: success - user_data: - description: - - Specifies the user data to be injected during the ECS creation - process. Text, text files, and gzip files can be injected. - The content to be injected must be encoded with base64. The maximum - size of the content to be injected (before encoding) is 32 KB. For - Linux ECSs, this parameter does not take effect when adminPass is - used. - type: str - returned: success - config_drive: - description: - - Specifies the configuration driver. - type: str - returned: success - created: - description: - - Specifies the time when an ECS was created. - type: str - returned: success - disk_config_type: - description: - - Specifies the disk configuration type. MANUAL is The image - space is not expanded. AUTO is the image space of the system disk - will be expanded to be as same as the flavor. - type: str - returned: success - host_name: - description: - - Specifies the host name of the ECS. - type: str - returned: success - image_name: - description: - - Specifies the image name of the ECS. - type: str - returned: success - power_state: - description: - - Specifies the power status of the ECS. - type: int - returned: success - server_alias: - description: - - Specifies the ECS alias. - type: str - returned: success - status: - description: - - Specifies the ECS status. Options are ACTIVE, REBOOT, HARD_REBOOT, - REBUILD, MIGRATING, BUILD, SHUTOFF, RESIZE, VERIFY_RESIZE, ERROR, - and DELETED. - type: str - returned: success -''' - -from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( - Config, HwcClientException, HwcModule, are_different_dicts, build_path, - get_region, is_empty_value, navigate_value, wait_to_finish) - - -def build_module(): - return HwcModule( - argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - timeouts=dict(type='dict', options=dict( - create=dict(default='30m', type='str'), - update=dict(default='30m', type='str'), - delete=dict(default='30m', type='str'), - ), default=dict()), - availability_zone=dict(type='str', required=True), - flavor_name=dict(type='str', required=True), - image_id=dict(type='str', required=True), - name=dict(type='str', required=True), - nics=dict( - type='list', required=True, elements='dict', - options=dict( - ip_address=dict(type='str', required=True), - subnet_id=dict(type='str', required=True) - ), - ), - root_volume=dict(type='dict', required=True, options=dict( - volume_type=dict(type='str', required=True), - size=dict(type='int'), - snapshot_id=dict(type='str') - )), - vpc_id=dict(type='str', required=True), - admin_pass=dict(type='str', no_log=True), - data_volumes=dict(type='list', elements='dict', options=dict( - volume_id=dict(type='str', required=True), - device=dict(type='str') - )), - description=dict(type='str'), - eip_id=dict(type='str'), - enable_auto_recovery=dict(type='bool'), - enterprise_project_id=dict(type='str'), - security_groups=dict(type='list', elements='str'), - server_metadata=dict(type='dict'), - server_tags=dict(type='dict'), - ssh_key_name=dict(type='str'), - user_data=dict(type='str') - ), - supports_check_mode=True, - ) - - -def main(): - """Main function""" - - module = build_module() - config = Config(module, "ecs") - - try: - _init(config) - is_exist = module.params['id'] - - result = None - changed = False - if module.params['state'] == 'present': - if not is_exist: - if not module.check_mode: - create(config) - changed = True - - inputv = user_input_parameters(module) - resp, array_index = read_resource(config) - result = build_state(inputv, resp, array_index) - set_readonly_options(inputv, result) - if are_different_dicts(inputv, result): - if not module.check_mode: - update(config, inputv, result) - - inputv = user_input_parameters(module) - resp, array_index = read_resource(config) - result = build_state(inputv, resp, array_index) - set_readonly_options(inputv, result) - if are_different_dicts(inputv, result): - raise Exception("Update resource failed, " - "some attributes are not updated") - - changed = True - - result['id'] = module.params.get('id') - else: - result = dict() - if is_exist: - if not module.check_mode: - delete(config) - changed = True - - except Exception as ex: - module.fail_json(msg=str(ex)) - - else: - result['changed'] = changed - module.exit_json(**result) - - -def _init(config): - module = config.module - if module.params['id']: - return - - v = search_resource(config) - n = len(v) - if n > 1: - raise Exception("Found more than one resource(%s)" % ", ".join([ - navigate_value(i, ["id"]) - for i in v - ])) - - if n == 1: - module.params['id'] = navigate_value(v[0], ["id"]) - - -def user_input_parameters(module): - return { - "admin_pass": module.params.get("admin_pass"), - "availability_zone": module.params.get("availability_zone"), - "data_volumes": module.params.get("data_volumes"), - "description": module.params.get("description"), - "eip_id": module.params.get("eip_id"), - "enable_auto_recovery": module.params.get("enable_auto_recovery"), - "enterprise_project_id": module.params.get("enterprise_project_id"), - "flavor_name": module.params.get("flavor_name"), - "image_id": module.params.get("image_id"), - "name": module.params.get("name"), - "nics": module.params.get("nics"), - "root_volume": module.params.get("root_volume"), - "security_groups": module.params.get("security_groups"), - "server_metadata": module.params.get("server_metadata"), - "server_tags": module.params.get("server_tags"), - "ssh_key_name": module.params.get("ssh_key_name"), - "user_data": module.params.get("user_data"), - "vpc_id": module.params.get("vpc_id"), - } - - -def create(config): - module = config.module - client = config.client(get_region(module), "ecs", "project") - timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) - opts = user_input_parameters(module) - opts["ansible_module"] = module - - params = build_create_parameters(opts) - r = send_create_request(module, params, client) - obj = async_wait(config, r, client, timeout) - - sub_job_identity = { - "job_type": "createSingleServer", - } - for item in navigate_value(obj, ["entities", "sub_jobs"]): - for k, v in sub_job_identity.items(): - if item[k] != v: - break - else: - obj = item - break - else: - raise Exception("Can't find the sub job") - module.params['id'] = navigate_value(obj, ["entities", "server_id"]) - - -def update(config, expect_state, current_state): - module = config.module - expect_state["current_state"] = current_state - current_state["current_state"] = current_state - timeout = 60 * int(module.params['timeouts']['update'].rstrip('m')) - client = config.client(get_region(module), "ecs", "project") - - params = build_delete_nics_parameters(expect_state) - params1 = build_delete_nics_parameters(current_state) - if params and are_different_dicts(params, params1): - r = send_delete_nics_request(module, params, client) - async_wait(config, r, client, timeout) - - params = build_set_auto_recovery_parameters(expect_state) - params1 = build_set_auto_recovery_parameters(current_state) - if params and are_different_dicts(params, params1): - send_set_auto_recovery_request(module, params, client) - - params = build_attach_nics_parameters(expect_state) - params1 = build_attach_nics_parameters(current_state) - if params and are_different_dicts(params, params1): - r = send_attach_nics_request(module, params, client) - async_wait(config, r, client, timeout) - - multi_invoke_delete_volume(config, expect_state, client, timeout) - - multi_invoke_attach_data_disk(config, expect_state, client, timeout) - - -def delete(config): - module = config.module - client = config.client(get_region(module), "ecs", "project") - timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m')) - - opts = user_input_parameters(module) - opts["ansible_module"] = module - - params = build_delete_parameters(opts) - if params: - r = send_delete_request(module, params, client) - async_wait(config, r, client, timeout) - - -def read_resource(config): - module = config.module - client = config.client(get_region(module), "ecs", "project") - - res = {} - - r = send_read_request(module, client) - preprocess_read_response(r) - res["read"] = fill_read_resp_body(r) - - r = send_read_auto_recovery_request(module, client) - res["read_auto_recovery"] = fill_read_auto_recovery_resp_body(r) - - return res, None - - -def preprocess_read_response(resp): - v = resp.get("os-extended-volumes:volumes_attached") - if v and isinstance(v, list): - for i in range(len(v)): - if v[i].get("bootIndex") == "0": - root_volume = v[i] - - if (i + 1) != len(v): - v[i] = v[-1] - - v.pop() - - resp["root_volume"] = root_volume - break - - v = resp.get("addresses") - if v: - rv = {} - eips = [] - for val in v.values(): - for item in val: - if item["OS-EXT-IPS:type"] == "floating": - eips.append(item) - else: - rv[item["OS-EXT-IPS:port_id"]] = item - - for item in eips: - k = item["OS-EXT-IPS:port_id"] - if k in rv: - rv[k]["eip_address"] = item.get("addr", "") - else: - rv[k] = item - item["eip_address"] = item.get("addr", "") - item["addr"] = "" - - resp["address"] = rv.values() - - -def build_state(opts, response, array_index): - states = flatten_options(response, array_index) - set_unreadable_options(opts, states) - adjust_options(opts, states) - return states - - -def _build_query_link(opts): - query_params = [] - - v = navigate_value(opts, ["enterprise_project_id"]) - if v or v in [False, 0]: - query_params.append( - "enterprise_project_id=" + (str(v) if v else str(v).lower())) - - v = navigate_value(opts, ["name"]) - if v or v in [False, 0]: - query_params.append( - "name=" + (str(v) if v else str(v).lower())) - - query_link = "?limit=10&offset={offset}" - if query_params: - query_link += "&" + "&".join(query_params) - - return query_link - - -def search_resource(config): - module = config.module - client = config.client(get_region(module), "ecs", "project") - opts = user_input_parameters(module) - identity_obj = _build_identity_object(opts) - query_link = _build_query_link(opts) - link = "cloudservers/detail" + query_link - - result = [] - p = {'offset': 1} - while True: - url = link.format(**p) - r = send_list_request(module, client, url) - if not r: - break - - for item in r: - item = fill_list_resp_body(item) - adjust_list_resp(identity_obj, item) - if not are_different_dicts(identity_obj, item): - result.append(item) - - if len(result) > 1: - break - - p['offset'] += 1 - - return result - - -def build_delete_nics_parameters(opts): - params = dict() - - v = expand_delete_nics_nics(opts, None) - if not is_empty_value(v): - params["nics"] = v - - return params - - -def expand_delete_nics_nics(d, array_index): - cv = d["current_state"].get("nics") - if not cv: - return None - - val = cv - - ev = d.get("nics") - if ev: - m = [item.get("ip_address") for item in ev] - val = [item for item in cv if item.get("ip_address") not in m] - - r = [] - for item in val: - transformed = dict() - - v = item.get("port_id") - if not is_empty_value(v): - transformed["id"] = v - - if transformed: - r.append(transformed) - - return r - - -def send_delete_nics_request(module, params, client): - url = build_path(module, "cloudservers/{id}/nics/delete") - - try: - r = client.post(url, params) - except HwcClientException as ex: - msg = ("module(hwc_ecs_instance): error running " - "api(delete_nics), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def build_set_auto_recovery_parameters(opts): - params = dict() - - v = expand_set_auto_recovery_support_auto_recovery(opts, None) - if v is not None: - params["support_auto_recovery"] = v - - return params - - -def expand_set_auto_recovery_support_auto_recovery(d, array_index): - v = navigate_value(d, ["enable_auto_recovery"], None) - return None if v is None else str(v).lower() - - -def send_set_auto_recovery_request(module, params, client): - url = build_path(module, "cloudservers/{id}/autorecovery") - - try: - r = client.put(url, params) - except HwcClientException as ex: - msg = ("module(hwc_ecs_instance): error running " - "api(set_auto_recovery), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def build_create_parameters(opts): - params = dict() - - v = navigate_value(opts, ["admin_pass"], None) - if not is_empty_value(v): - params["adminPass"] = v - - v = navigate_value(opts, ["availability_zone"], None) - if not is_empty_value(v): - params["availability_zone"] = v - - v = navigate_value(opts, ["description"], None) - if not is_empty_value(v): - params["description"] = v - - v = expand_create_extendparam(opts, None) - if not is_empty_value(v): - params["extendparam"] = v - - v = navigate_value(opts, ["flavor_name"], None) - if not is_empty_value(v): - params["flavorRef"] = v - - v = navigate_value(opts, ["image_id"], None) - if not is_empty_value(v): - params["imageRef"] = v - - v = navigate_value(opts, ["ssh_key_name"], None) - if not is_empty_value(v): - params["key_name"] = v - - v = navigate_value(opts, ["server_metadata"], None) - if not is_empty_value(v): - params["metadata"] = v - - v = navigate_value(opts, ["name"], None) - if not is_empty_value(v): - params["name"] = v - - v = expand_create_nics(opts, None) - if not is_empty_value(v): - params["nics"] = v - - v = expand_create_publicip(opts, None) - if not is_empty_value(v): - params["publicip"] = v - - v = expand_create_root_volume(opts, None) - if not is_empty_value(v): - params["root_volume"] = v - - v = expand_create_security_groups(opts, None) - if not is_empty_value(v): - params["security_groups"] = v - - v = expand_create_server_tags(opts, None) - if not is_empty_value(v): - params["server_tags"] = v - - v = navigate_value(opts, ["user_data"], None) - if not is_empty_value(v): - params["user_data"] = v - - v = navigate_value(opts, ["vpc_id"], None) - if not is_empty_value(v): - params["vpcid"] = v - - if not params: - return params - - params = {"server": params} - - return params - - -def expand_create_extendparam(d, array_index): - r = dict() - - r["chargingMode"] = 0 - - v = navigate_value(d, ["enterprise_project_id"], array_index) - if not is_empty_value(v): - r["enterprise_project_id"] = v - - v = navigate_value(d, ["enable_auto_recovery"], array_index) - if not is_empty_value(v): - r["support_auto_recovery"] = v - - return r - - -def expand_create_nics(d, array_index): - new_ai = dict() - if array_index: - new_ai.update(array_index) - - req = [] - - v = navigate_value( - d, ["nics"], new_ai) - - if not v: - return req - n = len(v) - for i in range(n): - new_ai["nics"] = i - transformed = dict() - - v = navigate_value(d, ["nics", "ip_address"], new_ai) - if not is_empty_value(v): - transformed["ip_address"] = v - - v = navigate_value(d, ["nics", "subnet_id"], new_ai) - if not is_empty_value(v): - transformed["subnet_id"] = v - - if transformed: - req.append(transformed) - - return req - - -def expand_create_publicip(d, array_index): - r = dict() - - v = navigate_value(d, ["eip_id"], array_index) - if not is_empty_value(v): - r["id"] = v - - return r - - -def expand_create_root_volume(d, array_index): - r = dict() - - v = expand_create_root_volume_extendparam(d, array_index) - if not is_empty_value(v): - r["extendparam"] = v - - v = navigate_value(d, ["root_volume", "size"], array_index) - if not is_empty_value(v): - r["size"] = v - - v = navigate_value(d, ["root_volume", "volume_type"], array_index) - if not is_empty_value(v): - r["volumetype"] = v - - return r - - -def expand_create_root_volume_extendparam(d, array_index): - r = dict() - - v = navigate_value(d, ["root_volume", "snapshot_id"], array_index) - if not is_empty_value(v): - r["snapshotId"] = v - - return r - - -def expand_create_security_groups(d, array_index): - v = d.get("security_groups") - if not v: - return None - - return [{"id": i} for i in v] - - -def expand_create_server_tags(d, array_index): - v = d.get("server_tags") - if not v: - return None - - return [{"key": k, "value": v1} for k, v1 in v.items()] - - -def send_create_request(module, params, client): - url = "cloudservers" - try: - r = client.post(url, params) - except HwcClientException as ex: - msg = ("module(hwc_ecs_instance): error running " - "api(create), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def build_attach_nics_parameters(opts): - params = dict() - - v = expand_attach_nics_nics(opts, None) - if not is_empty_value(v): - params["nics"] = v - - return params - - -def expand_attach_nics_nics(d, array_index): - ev = d.get("nics") - if not ev: - return None - - val = ev - - cv = d["current_state"].get("nics") - if cv: - m = [item.get("ip_address") for item in cv] - val = [item for item in ev if item.get("ip_address") not in m] - - r = [] - for item in val: - transformed = dict() - - v = item.get("ip_address") - if not is_empty_value(v): - transformed["ip_address"] = v - - v = item.get("subnet_id") - if not is_empty_value(v): - transformed["subnet_id"] = v - - if transformed: - r.append(transformed) - - return r - - -def send_attach_nics_request(module, params, client): - url = build_path(module, "cloudservers/{id}/nics") - - try: - r = client.post(url, params) - except HwcClientException as ex: - msg = ("module(hwc_ecs_instance): error running " - "api(attach_nics), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def send_delete_volume_request(module, params, client, info): - path_parameters = { - "volume_id": ["volume_id"], - } - data = dict((key, navigate_value(info, path)) - for key, path in path_parameters.items()) - - url = build_path(module, "cloudservers/{id}/detachvolume/{volume_id}", data) - - try: - r = client.delete(url, params) - except HwcClientException as ex: - msg = ("module(hwc_ecs_instance): error running " - "api(delete_volume), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def build_attach_data_disk_parameters(opts, array_index): - params = dict() - - v = expand_attach_data_disk_volume_attachment(opts, array_index) - if not is_empty_value(v): - params["volumeAttachment"] = v - - return params - - -def expand_attach_data_disk_volume_attachment(d, array_index): - r = dict() - - v = navigate_value(d, ["data_volumes", "device"], array_index) - if not is_empty_value(v): - r["device"] = v - - v = navigate_value(d, ["data_volumes", "volume_id"], array_index) - if not is_empty_value(v): - r["volumeId"] = v - - return r - - -def send_attach_data_disk_request(module, params, client): - url = build_path(module, "cloudservers/{id}/attachvolume") - - try: - r = client.post(url, params) - except HwcClientException as ex: - msg = ("module(hwc_ecs_instance): error running " - "api(attach_data_disk), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def build_delete_parameters(opts): - params = dict() - - params["delete_publicip"] = False - - params["delete_volume"] = False - - v = expand_delete_servers(opts, None) - if not is_empty_value(v): - params["servers"] = v - - return params - - -def expand_delete_servers(d, array_index): - new_ai = dict() - if array_index: - new_ai.update(array_index) - - req = [] - - n = 1 - for i in range(n): - transformed = dict() - - v = expand_delete_servers_id(d, new_ai) - if not is_empty_value(v): - transformed["id"] = v - - if transformed: - req.append(transformed) - - return req - - -def expand_delete_servers_id(d, array_index): - return d["ansible_module"].params.get("id") - - -def send_delete_request(module, params, client): - url = "cloudservers/delete" - try: - r = client.post(url, params) - except HwcClientException as ex: - msg = ("module(hwc_ecs_instance): error running " - "api(delete), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def async_wait(config, result, client, timeout): - module = config.module - - url = build_path(module, "jobs/{job_id}", result) - - def _query_status(): - r = None - try: - r = client.get(url, timeout=timeout) - except HwcClientException: - return None, "" - - try: - s = navigate_value(r, ["status"]) - return r, s - except Exception: - return None, "" - - try: - return wait_to_finish( - ["SUCCESS"], - ["RUNNING", "INIT"], - _query_status, timeout) - except Exception as ex: - module.fail_json(msg="module(hwc_ecs_instance): error " - "waiting to be done, error= %s" % str(ex)) - - -def multi_invoke_delete_volume(config, opts, client, timeout): - module = config.module - - opts1 = None - expect = opts["data_volumes"] - current = opts["current_state"]["data_volumes"] - if expect and current: - v = [i["volume_id"] for i in expect] - opts1 = { - "data_volumes": [ - i for i in current if i["volume_id"] not in v - ] - } - - loop_val = navigate_value(opts1, ["data_volumes"]) - if not loop_val: - return - - for i in range(len(loop_val)): - r = send_delete_volume_request(module, None, client, loop_val[i]) - async_wait(config, r, client, timeout) - - -def multi_invoke_attach_data_disk(config, opts, client, timeout): - module = config.module - - opts1 = opts - expect = opts["data_volumes"] - current = opts["current_state"]["data_volumes"] - if expect and current: - v = [i["volume_id"] for i in current] - opts1 = { - "data_volumes": [ - i for i in expect if i["volume_id"] not in v - ] - } - - loop_val = navigate_value(opts1, ["data_volumes"]) - if not loop_val: - return - - for i in range(len(loop_val)): - params = build_attach_data_disk_parameters(opts1, {"data_volumes": i}) - r = send_attach_data_disk_request(module, params, client) - async_wait(config, r, client, timeout) - - -def send_read_request(module, client): - url = build_path(module, "cloudservers/{id}") - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_ecs_instance): error running " - "api(read), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["server"], None) - - -def fill_read_resp_body(body): - result = dict() - - result["OS-DCF:diskConfig"] = body.get("OS-DCF:diskConfig") - - result["OS-EXT-AZ:availability_zone"] = body.get( - "OS-EXT-AZ:availability_zone") - - result["OS-EXT-SRV-ATTR:hostname"] = body.get("OS-EXT-SRV-ATTR:hostname") - - result["OS-EXT-SRV-ATTR:instance_name"] = body.get( - "OS-EXT-SRV-ATTR:instance_name") - - result["OS-EXT-SRV-ATTR:user_data"] = body.get("OS-EXT-SRV-ATTR:user_data") - - result["OS-EXT-STS:power_state"] = body.get("OS-EXT-STS:power_state") - - v = fill_read_resp_address(body.get("address")) - result["address"] = v - - result["config_drive"] = body.get("config_drive") - - result["created"] = body.get("created") - - result["description"] = body.get("description") - - result["enterprise_project_id"] = body.get("enterprise_project_id") - - v = fill_read_resp_flavor(body.get("flavor")) - result["flavor"] = v - - result["id"] = body.get("id") - - v = fill_read_resp_image(body.get("image")) - result["image"] = v - - result["key_name"] = body.get("key_name") - - v = fill_read_resp_metadata(body.get("metadata")) - result["metadata"] = v - - result["name"] = body.get("name") - - v = fill_read_resp_os_extended_volumes_volumes_attached( - body.get("os-extended-volumes:volumes_attached")) - result["os-extended-volumes:volumes_attached"] = v - - v = fill_read_resp_root_volume(body.get("root_volume")) - result["root_volume"] = v - - result["status"] = body.get("status") - - result["tags"] = body.get("tags") - - return result - - -def fill_read_resp_address(value): - if not value: - return None - - result = [] - for item in value: - val = dict() - - val["OS-EXT-IPS:port_id"] = item.get("OS-EXT-IPS:port_id") - - val["OS-EXT-IPS:type"] = item.get("OS-EXT-IPS:type") - - val["addr"] = item.get("addr") - - result.append(val) - - return result - - -def fill_read_resp_flavor(value): - if not value: - return None - - result = dict() - - result["id"] = value.get("id") - - return result - - -def fill_read_resp_image(value): - if not value: - return None - - result = dict() - - result["id"] = value.get("id") - - return result - - -def fill_read_resp_metadata(value): - if not value: - return None - - result = dict() - - result["image_name"] = value.get("image_name") - - result["vpc_id"] = value.get("vpc_id") - - return result - - -def fill_read_resp_os_extended_volumes_volumes_attached(value): - if not value: - return None - - result = [] - for item in value: - val = dict() - - val["bootIndex"] = item.get("bootIndex") - - val["device"] = item.get("device") - - val["id"] = item.get("id") - - result.append(val) - - return result - - -def fill_read_resp_root_volume(value): - if not value: - return None - - result = dict() - - result["device"] = value.get("device") - - result["id"] = value.get("id") - - return result - - -def send_read_auto_recovery_request(module, client): - url = build_path(module, "cloudservers/{id}/autorecovery") - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_ecs_instance): error running " - "api(read_auto_recovery), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def fill_read_auto_recovery_resp_body(body): - result = dict() - - result["support_auto_recovery"] = body.get("support_auto_recovery") - - return result - - -def flatten_options(response, array_index): - r = dict() - - v = navigate_value( - response, ["read", "OS-EXT-AZ:availability_zone"], array_index) - r["availability_zone"] = v - - v = navigate_value(response, ["read", "config_drive"], array_index) - r["config_drive"] = v - - v = navigate_value(response, ["read", "created"], array_index) - r["created"] = v - - v = flatten_data_volumes(response, array_index) - r["data_volumes"] = v - - v = navigate_value(response, ["read", "description"], array_index) - r["description"] = v - - v = navigate_value(response, ["read", "OS-DCF:diskConfig"], array_index) - r["disk_config_type"] = v - - v = flatten_enable_auto_recovery(response, array_index) - r["enable_auto_recovery"] = v - - v = navigate_value( - response, ["read", "enterprise_project_id"], array_index) - r["enterprise_project_id"] = v - - v = navigate_value(response, ["read", "flavor", "id"], array_index) - r["flavor_name"] = v - - v = navigate_value( - response, ["read", "OS-EXT-SRV-ATTR:hostname"], array_index) - r["host_name"] = v - - v = navigate_value(response, ["read", "image", "id"], array_index) - r["image_id"] = v - - v = navigate_value( - response, ["read", "metadata", "image_name"], array_index) - r["image_name"] = v - - v = navigate_value(response, ["read", "name"], array_index) - r["name"] = v - - v = flatten_nics(response, array_index) - r["nics"] = v - - v = navigate_value( - response, ["read", "OS-EXT-STS:power_state"], array_index) - r["power_state"] = v - - v = flatten_root_volume(response, array_index) - r["root_volume"] = v - - v = navigate_value( - response, ["read", "OS-EXT-SRV-ATTR:instance_name"], array_index) - r["server_alias"] = v - - v = flatten_server_tags(response, array_index) - r["server_tags"] = v - - v = navigate_value(response, ["read", "key_name"], array_index) - r["ssh_key_name"] = v - - v = navigate_value(response, ["read", "status"], array_index) - r["status"] = v - - v = navigate_value( - response, ["read", "OS-EXT-SRV-ATTR:user_data"], array_index) - r["user_data"] = v - - v = navigate_value(response, ["read", "metadata", "vpc_id"], array_index) - r["vpc_id"] = v - - return r - - -def flatten_data_volumes(d, array_index): - v = navigate_value(d, ["read", "os-extended-volumes:volumes_attached"], - array_index) - if not v: - return None - n = len(v) - result = [] - - new_ai = dict() - if array_index: - new_ai.update(array_index) - - for i in range(n): - new_ai["read.os-extended-volumes:volumes_attached"] = i - - val = dict() - - v = navigate_value( - d, ["read", "os-extended-volumes:volumes_attached", "device"], new_ai) - val["device"] = v - - v = navigate_value( - d, ["read", "os-extended-volumes:volumes_attached", "id"], new_ai) - val["volume_id"] = v - - for v in val.values(): - if v is not None: - result.append(val) - break - - return result if result else None - - -def flatten_enable_auto_recovery(d, array_index): - v = navigate_value(d, ["read_auto_recovery", "support_auto_recovery"], - array_index) - return v == "true" - - -def flatten_nics(d, array_index): - v = navigate_value(d, ["read", "address"], - array_index) - if not v: - return None - n = len(v) - result = [] - - new_ai = dict() - if array_index: - new_ai.update(array_index) - - for i in range(n): - new_ai["read.address"] = i - - val = dict() - - v = navigate_value(d, ["read", "address", "addr"], new_ai) - val["ip_address"] = v - - v = navigate_value( - d, ["read", "address", "OS-EXT-IPS:port_id"], new_ai) - val["port_id"] = v - - for v in val.values(): - if v is not None: - result.append(val) - break - - return result if result else None - - -def flatten_root_volume(d, array_index): - result = dict() - - v = navigate_value(d, ["read", "root_volume", "device"], array_index) - result["device"] = v - - v = navigate_value(d, ["read", "root_volume", "id"], array_index) - result["volume_id"] = v - - for v in result.values(): - if v is not None: - return result - return None - - -def flatten_server_tags(d, array_index): - v = navigate_value(d, ["read", "tags"], array_index) - if not v: - return None - - r = dict() - for item in v: - v1 = item.split("=") - if v1: - r[v1[0]] = v1[1] - return r - - -def adjust_options(opts, states): - adjust_data_volumes(opts, states) - - adjust_nics(opts, states) - - -def adjust_data_volumes(parent_input, parent_cur): - iv = parent_input.get("data_volumes") - if not (iv and isinstance(iv, list)): - return - - cv = parent_cur.get("data_volumes") - if not (cv and isinstance(cv, list)): - return - - lcv = len(cv) - result = [] - q = [] - for iiv in iv: - if len(q) == lcv: - break - - icv = None - for j in range(lcv): - if j in q: - continue - - icv = cv[j] - - if iiv["volume_id"] != icv["volume_id"]: - continue - - result.append(icv) - q.append(j) - break - else: - break - - if len(q) != lcv: - for i in range(lcv): - if i not in q: - result.append(cv[i]) - - if len(result) != lcv: - raise Exception("adjust property(data_volumes) failed, " - "the array number is not equal") - - parent_cur["data_volumes"] = result - - -def adjust_nics(parent_input, parent_cur): - iv = parent_input.get("nics") - if not (iv and isinstance(iv, list)): - return - - cv = parent_cur.get("nics") - if not (cv and isinstance(cv, list)): - return - - lcv = len(cv) - result = [] - q = [] - for iiv in iv: - if len(q) == lcv: - break - - icv = None - for j in range(lcv): - if j in q: - continue - - icv = cv[j] - - if iiv["ip_address"] != icv["ip_address"]: - continue - - result.append(icv) - q.append(j) - break - else: - break - - if len(q) != lcv: - for i in range(lcv): - if i not in q: - result.append(cv[i]) - - if len(result) != lcv: - raise Exception("adjust property(nics) failed, " - "the array number is not equal") - - parent_cur["nics"] = result - - -def set_unreadable_options(opts, states): - states["admin_pass"] = opts.get("admin_pass") - - states["eip_id"] = opts.get("eip_id") - - set_unread_nics( - opts.get("nics"), states.get("nics")) - - set_unread_root_volume( - opts.get("root_volume"), states.get("root_volume")) - - states["security_groups"] = opts.get("security_groups") - - states["server_metadata"] = opts.get("server_metadata") - - -def set_unread_nics(inputv, curv): - if not (inputv and isinstance(inputv, list)): - return - - if not (curv and isinstance(curv, list)): - return - - lcv = len(curv) - q = [] - for iv in inputv: - if len(q) == lcv: - break - - cv = None - for j in range(lcv): - if j in q: - continue - - cv = curv[j] - - if iv["ip_address"] != cv["ip_address"]: - continue - - q.append(j) - break - else: - continue - - cv["subnet_id"] = iv.get("subnet_id") - - -def set_unread_root_volume(inputv, curv): - if not (inputv and isinstance(inputv, dict)): - return - - if not (curv and isinstance(curv, dict)): - return - - curv["size"] = inputv.get("size") - - curv["snapshot_id"] = inputv.get("snapshot_id") - - curv["volume_type"] = inputv.get("volume_type") - - -def set_readonly_options(opts, states): - opts["config_drive"] = states.get("config_drive") - - opts["created"] = states.get("created") - - opts["disk_config_type"] = states.get("disk_config_type") - - opts["host_name"] = states.get("host_name") - - opts["image_name"] = states.get("image_name") - - set_readonly_nics( - opts.get("nics"), states.get("nics")) - - opts["power_state"] = states.get("power_state") - - set_readonly_root_volume( - opts.get("root_volume"), states.get("root_volume")) - - opts["server_alias"] = states.get("server_alias") - - opts["status"] = states.get("status") - - -def set_readonly_nics(inputv, curv): - if not (curv and isinstance(curv, list)): - return - - if not (inputv and isinstance(inputv, list)): - return - - lcv = len(curv) - q = [] - for iv in inputv: - if len(q) == lcv: - break - - cv = None - for j in range(lcv): - if j in q: - continue - - cv = curv[j] - - if iv["ip_address"] != cv["ip_address"]: - continue - - q.append(j) - break - else: - continue - - iv["port_id"] = cv.get("port_id") - - -def set_readonly_root_volume(inputv, curv): - if not (inputv and isinstance(inputv, dict)): - return - - if not (curv and isinstance(curv, dict)): - return - - inputv["device"] = curv.get("device") - - inputv["volume_id"] = curv.get("volume_id") - - -def send_list_request(module, client, url): - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_ecs_instance): error running " - "api(list), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["servers"], None) - - -def _build_identity_object(all_opts): - result = dict() - - result["OS-DCF:diskConfig"] = None - - v = navigate_value(all_opts, ["availability_zone"], None) - result["OS-EXT-AZ:availability_zone"] = v - - result["OS-EXT-SRV-ATTR:hostname"] = None - - result["OS-EXT-SRV-ATTR:instance_name"] = None - - v = navigate_value(all_opts, ["user_data"], None) - result["OS-EXT-SRV-ATTR:user_data"] = v - - result["OS-EXT-STS:power_state"] = None - - result["config_drive"] = None - - result["created"] = None - - v = navigate_value(all_opts, ["description"], None) - result["description"] = v - - v = navigate_value(all_opts, ["enterprise_project_id"], None) - result["enterprise_project_id"] = v - - v = expand_list_flavor(all_opts, None) - result["flavor"] = v - - result["id"] = None - - v = expand_list_image(all_opts, None) - result["image"] = v - - v = navigate_value(all_opts, ["ssh_key_name"], None) - result["key_name"] = v - - v = expand_list_metadata(all_opts, None) - result["metadata"] = v - - v = navigate_value(all_opts, ["name"], None) - result["name"] = v - - result["status"] = None - - v = expand_list_tags(all_opts, None) - result["tags"] = v - - return result - - -def expand_list_flavor(d, array_index): - r = dict() - - v = navigate_value(d, ["flavor_name"], array_index) - r["id"] = v - - for v in r.values(): - if v is not None: - return r - return None - - -def expand_list_image(d, array_index): - r = dict() - - v = navigate_value(d, ["image_id"], array_index) - r["id"] = v - - for v in r.values(): - if v is not None: - return r - return None - - -def expand_list_metadata(d, array_index): - r = dict() - - v = navigate_value(d, ["vpc_id"], array_index) - r["vpc_id"] = v - - for v in r.values(): - if v is not None: - return r - return None - - -def expand_list_tags(d, array_index): - v = d.get("server_tags") - if not v: - return None - - return [k + "=" + v1 for k, v1 in v.items()] - - -def fill_list_resp_body(body): - result = dict() - - result["OS-DCF:diskConfig"] = body.get("OS-DCF:diskConfig") - - result["OS-EXT-AZ:availability_zone"] = body.get( - "OS-EXT-AZ:availability_zone") - - result["OS-EXT-SRV-ATTR:hostname"] = body.get("OS-EXT-SRV-ATTR:hostname") - - result["OS-EXT-SRV-ATTR:instance_name"] = body.get( - "OS-EXT-SRV-ATTR:instance_name") - - result["OS-EXT-SRV-ATTR:user_data"] = body.get("OS-EXT-SRV-ATTR:user_data") - - result["OS-EXT-STS:power_state"] = body.get("OS-EXT-STS:power_state") - - result["config_drive"] = body.get("config_drive") - - result["created"] = body.get("created") - - result["description"] = body.get("description") - - result["enterprise_project_id"] = body.get("enterprise_project_id") - - v = fill_list_resp_flavor(body.get("flavor")) - result["flavor"] = v - - result["id"] = body.get("id") - - v = fill_list_resp_image(body.get("image")) - result["image"] = v - - result["key_name"] = body.get("key_name") - - v = fill_list_resp_metadata(body.get("metadata")) - result["metadata"] = v - - result["name"] = body.get("name") - - result["status"] = body.get("status") - - result["tags"] = body.get("tags") - - return result - - -def fill_list_resp_flavor(value): - if not value: - return None - - result = dict() - - result["id"] = value.get("id") - - return result - - -def fill_list_resp_image(value): - if not value: - return None - - result = dict() - - result["id"] = value.get("id") - - return result - - -def fill_list_resp_metadata(value): - if not value: - return None - - result = dict() - - result["vpc_id"] = value.get("vpc_id") - - return result - - -def adjust_list_resp(opts, resp): - adjust_list_api_tags(opts, resp) - - -def adjust_list_api_tags(parent_input, parent_cur): - iv = parent_input.get("tags") - if not (iv and isinstance(iv, list)): - return - - cv = parent_cur.get("tags") - if not (cv and isinstance(cv, list)): - return - - result = [] - for iiv in iv: - if iiv not in cv: - break - - result.append(iiv) - - j = cv.index(iiv) - cv[j] = cv[-1] - cv.pop() - - if cv: - result.extend(cv) - parent_cur["tags"] = result - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_evs_disk.py b/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_evs_disk.py deleted file mode 100644 index 4aec1b94..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_evs_disk.py +++ /dev/null @@ -1,1210 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -############################################################################### -# Documentation -############################################################################### - -DOCUMENTATION = ''' ---- -module: hwc_evs_disk -description: - - block storage management. -short_description: Creates a resource of Evs/Disk in Huawei Cloud -version_added: '0.2.0' -author: Huawei Inc. (@huaweicloud) -requirements: - - keystoneauth1 >= 3.6.0 -options: - state: - description: - - Whether the given object should exist in Huaweicloud Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: - description: - - The timeouts for each operations. - type: dict - suboptions: - create: - description: - - The timeouts for create operation. - type: str - default: '30m' - update: - description: - - The timeouts for update operation. - type: str - default: '30m' - delete: - description: - - The timeouts for delete operation. - type: str - default: '30m' - availability_zone: - description: - - Specifies the AZ where you want to create the disk. - type: str - required: true - name: - description: - - Specifies the disk name. The value can contain a maximum of 255 - bytes. - type: str - required: true - volume_type: - description: - - Specifies the disk type. Currently, the value can be SSD, SAS, or - SATA. - - SSD specifies the ultra-high I/O disk type. - - SAS specifies the high I/O disk type. - - SATA specifies the common I/O disk type. - - If the specified disk type is not available in the AZ, the - disk will fail to create. If the EVS disk is created from a - snapshot, the volume_type field must be the same as that of the - snapshot's source disk. - type: str - required: true - backup_id: - description: - - Specifies the ID of the backup that can be used to create a disk. - This parameter is mandatory when you use a backup to create the - disk. - type: str - required: false - description: - description: - - Specifies the disk description. The value can contain a maximum - of 255 bytes. - type: str - required: false - enable_full_clone: - description: - - If the disk is created from a snapshot and linked cloning needs - to be used, set this parameter to True. - type: bool - required: false - enable_scsi: - description: - - If this parameter is set to True, the disk device type will be - SCSI, which allows ECS OSs to directly access underlying storage - media. SCSI reservation command is supported. If this parameter - is set to False, the disk device type will be VBD, which supports - only simple SCSI read/write commands. - - If parameter enable_share is set to True and this parameter - is not specified, shared SCSI disks are created. SCSI EVS disks - cannot be created from backups, which means that this parameter - cannot be True if backup_id has been specified. - type: bool - required: false - enable_share: - description: - - Specifies whether the disk is shareable. The default value is - False. - type: bool - required: false - encryption_id: - description: - - Specifies the encryption ID. The length of it fixes at 36 bytes. - type: str - required: false - enterprise_project_id: - description: - - Specifies the enterprise project ID. This ID is associated with - the disk during the disk creation. If it is not specified, the - disk is bound to the default enterprise project. - type: str - required: false - image_id: - description: - - Specifies the image ID. If this parameter is specified, the disk - is created from an image. BMS system disks cannot be - created from BMS images. - type: str - required: false - size: - description: - - Specifies the disk size, in GB. Its values are as follows, System - disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. This - parameter is mandatory when you create an empty disk or use an - image or a snapshot to create a disk. If you use an image or a - snapshot to create a disk, the disk size must be greater than or - equal to the image or snapshot size. This parameter is optional - when you use a backup to create a disk. If this parameter is not - specified, the disk size is equal to the backup size. - type: int - required: false - snapshot_id: - description: - - Specifies the snapshot ID. If this parameter is specified, the - disk is created from a snapshot. - type: str - required: false -extends_documentation_fragment: -- community.general.hwc - -''' - -EXAMPLES = ''' -# test create disk -- name: Create a disk - community.general.hwc_evs_disk: - availability_zone: "cn-north-1a" - name: "ansible_evs_disk_test" - volume_type: "SATA" - size: 10 -''' - -RETURN = ''' - availability_zone: - description: - - Specifies the AZ where you want to create the disk. - type: str - returned: success - name: - description: - - Specifies the disk name. The value can contain a maximum of 255 - bytes. - type: str - returned: success - volume_type: - description: - - Specifies the disk type. Currently, the value can be SSD, SAS, or - SATA. - - SSD specifies the ultra-high I/O disk type. - - SAS specifies the high I/O disk type. - - SATA specifies the common I/O disk type. - - If the specified disk type is not available in the AZ, the - disk will fail to create. If the EVS disk is created from a - snapshot, the volume_type field must be the same as that of the - snapshot's source disk. - type: str - returned: success - backup_id: - description: - - Specifies the ID of the backup that can be used to create a disk. - This parameter is mandatory when you use a backup to create the - disk. - type: str - returned: success - description: - description: - - Specifies the disk description. The value can contain a maximum - of 255 bytes. - type: str - returned: success - enable_full_clone: - description: - - If the disk is created from a snapshot and linked cloning needs - to be used, set this parameter to True. - type: bool - returned: success - enable_scsi: - description: - - If this parameter is set to True, the disk device type will be - SCSI, which allows ECS OSs to directly access underlying storage - media. SCSI reservation command is supported. If this parameter - is set to False, the disk device type will be VBD, which supports - only simple SCSI read/write commands. - - If parameter enable_share is set to True and this parameter - is not specified, shared SCSI disks are created. SCSI EVS disks - cannot be created from backups, which means that this parameter - cannot be True if backup_id has been specified. - type: bool - returned: success - enable_share: - description: - - Specifies whether the disk is shareable. The default value is - False. - type: bool - returned: success - encryption_id: - description: - - Specifies the encryption ID. The length of it fixes at 36 bytes. - type: str - returned: success - enterprise_project_id: - description: - - Specifies the enterprise project ID. This ID is associated with - the disk during the disk creation. If it is not specified, the - disk is bound to the default enterprise project. - type: str - returned: success - image_id: - description: - - Specifies the image ID. If this parameter is specified, the disk - is created from an image. BMS system disks cannot be - created from BMS images. - type: str - returned: success - size: - description: - - Specifies the disk size, in GB. Its values are as follows, System - disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. This - parameter is mandatory when you create an empty disk or use an - image or a snapshot to create a disk. If you use an image or a - snapshot to create a disk, the disk size must be greater than or - equal to the image or snapshot size. This parameter is optional - when you use a backup to create a disk. If this parameter is not - specified, the disk size is equal to the backup size. - type: int - returned: success - snapshot_id: - description: - - Specifies the snapshot ID. If this parameter is specified, the - disk is created from a snapshot. - type: str - returned: success - attachments: - description: - - Specifies the disk attachment information. - type: complex - returned: success - contains: - attached_at: - description: - - Specifies the time when the disk was attached. Time - format is 'UTC YYYY-MM-DDTHH:MM:SS'. - type: str - returned: success - attachment_id: - description: - - Specifies the ID of the attachment information. - type: str - returned: success - device: - description: - - Specifies the device name. - type: str - returned: success - server_id: - description: - - Specifies the ID of the server to which the disk is - attached. - type: str - returned: success - backup_policy_id: - description: - - Specifies the backup policy ID. - type: str - returned: success - created_at: - description: - - Specifies the time when the disk was created. Time format is 'UTC - YYYY-MM-DDTHH:MM:SS'. - type: str - returned: success - is_bootable: - description: - - Specifies whether the disk is bootable. - type: bool - returned: success - is_readonly: - description: - - Specifies whether the disk is read-only or read/write. True - indicates that the disk is read-only. False indicates that the - disk is read/write. - type: bool - returned: success - source_volume_id: - description: - - Specifies the source disk ID. This parameter has a value if the - disk is created from a source disk. - type: str - returned: success - status: - description: - - Specifies the disk status. - type: str - returned: success - tags: - description: - - Specifies the disk tags. - type: dict - returned: success -''' - -from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( - Config, HwcClientException, HwcModule, are_different_dicts, build_path, - get_region, is_empty_value, navigate_value, wait_to_finish) - - -def build_module(): - return HwcModule( - argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - timeouts=dict(type='dict', options=dict( - create=dict(default='30m', type='str'), - update=dict(default='30m', type='str'), - delete=dict(default='30m', type='str'), - ), default=dict()), - availability_zone=dict(type='str', required=True), - name=dict(type='str', required=True), - volume_type=dict(type='str', required=True), - backup_id=dict(type='str'), - description=dict(type='str'), - enable_full_clone=dict(type='bool'), - enable_scsi=dict(type='bool'), - enable_share=dict(type='bool'), - encryption_id=dict(type='str'), - enterprise_project_id=dict(type='str'), - image_id=dict(type='str'), - size=dict(type='int'), - snapshot_id=dict(type='str') - ), - supports_check_mode=True, - ) - - -def main(): - """Main function""" - - module = build_module() - config = Config(module, "evs") - - try: - _init(config) - is_exist = module.params.get('id') - - result = None - changed = False - if module.params['state'] == 'present': - if not is_exist: - if not module.check_mode: - create(config) - changed = True - - inputv = user_input_parameters(module) - resp, array_index = read_resource(config) - result = build_state(inputv, resp, array_index) - set_readonly_options(inputv, result) - if are_different_dicts(inputv, result): - if not module.check_mode: - update(config, inputv, result) - - inputv = user_input_parameters(module) - resp, array_index = read_resource(config) - result = build_state(inputv, resp, array_index) - set_readonly_options(inputv, result) - if are_different_dicts(inputv, result): - raise Exception("Update resource failed, " - "some attributes are not updated") - - changed = True - - result['id'] = module.params.get('id') - else: - result = dict() - if is_exist: - if not module.check_mode: - delete(config) - changed = True - - except Exception as ex: - module.fail_json(msg=str(ex)) - - else: - result['changed'] = changed - module.exit_json(**result) - - -def _init(config): - module = config.module - if module.params.get('id'): - return - - v = search_resource(config) - n = len(v) - if n > 1: - raise Exception("find more than one resources(%s)" % ", ".join([ - navigate_value(i, ["id"]) - for i in v - ])) - - if n == 1: - module.params['id'] = navigate_value(v[0], ["id"]) - - -def user_input_parameters(module): - return { - "availability_zone": module.params.get("availability_zone"), - "backup_id": module.params.get("backup_id"), - "description": module.params.get("description"), - "enable_full_clone": module.params.get("enable_full_clone"), - "enable_scsi": module.params.get("enable_scsi"), - "enable_share": module.params.get("enable_share"), - "encryption_id": module.params.get("encryption_id"), - "enterprise_project_id": module.params.get("enterprise_project_id"), - "image_id": module.params.get("image_id"), - "name": module.params.get("name"), - "size": module.params.get("size"), - "snapshot_id": module.params.get("snapshot_id"), - "volume_type": module.params.get("volume_type"), - } - - -def create(config): - module = config.module - client = config.client(get_region(module), "volumev3", "project") - timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) - opts = user_input_parameters(module) - opts["ansible_module"] = module - - params = build_create_parameters(opts) - r = send_create_request(module, params, client) - - client1 = config.client(get_region(module), "volume", "project") - client1.endpoint = client1.endpoint.replace("/v2/", "/v1/") - obj = async_wait(config, r, client1, timeout) - module.params['id'] = navigate_value(obj, ["entities", "volume_id"]) - - -def update(config, expect_state, current_state): - module = config.module - expect_state["current_state"] = current_state - current_state["current_state"] = current_state - client = config.client(get_region(module), "evs", "project") - timeout = 60 * int(module.params['timeouts']['update'].rstrip('m')) - - params = build_update_parameters(expect_state) - params1 = build_update_parameters(current_state) - if params and are_different_dicts(params, params1): - send_update_request(module, params, client) - - params = build_extend_disk_parameters(expect_state) - params1 = build_extend_disk_parameters(current_state) - if params and are_different_dicts(params, params1): - client1 = config.client(get_region(module), "evsv2.1", "project") - r = send_extend_disk_request(module, params, client1) - - client1 = config.client(get_region(module), "volume", "project") - client1.endpoint = client1.endpoint.replace("/v2/", "/v1/") - async_wait(config, r, client1, timeout) - - -def delete(config): - module = config.module - client = config.client(get_region(module), "evs", "project") - timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m')) - - r = send_delete_request(module, None, client) - - client = config.client(get_region(module), "volume", "project") - client.endpoint = client.endpoint.replace("/v2/", "/v1/") - async_wait(config, r, client, timeout) - - -def read_resource(config): - module = config.module - client = config.client(get_region(module), "volumev3", "project") - - res = {} - - r = send_read_request(module, client) - res["read"] = fill_read_resp_body(r) - - return res, None - - -def build_state(opts, response, array_index): - states = flatten_options(response, array_index) - set_unreadable_options(opts, states) - return states - - -def _build_query_link(opts): - query_params = [] - - v = navigate_value(opts, ["enable_share"]) - if v or v in [False, 0]: - query_params.append( - "multiattach=" + (str(v) if v else str(v).lower())) - - v = navigate_value(opts, ["name"]) - if v or v in [False, 0]: - query_params.append( - "name=" + (str(v) if v else str(v).lower())) - - v = navigate_value(opts, ["availability_zone"]) - if v or v in [False, 0]: - query_params.append( - "availability_zone=" + (str(v) if v else str(v).lower())) - - query_link = "?limit=10&offset={start}" - if query_params: - query_link += "&" + "&".join(query_params) - - return query_link - - -def search_resource(config): - module = config.module - client = config.client(get_region(module), "volumev3", "project") - opts = user_input_parameters(module) - name = module.params.get("name") - query_link = _build_query_link(opts) - link = "os-vendor-volumes/detail" + query_link - - result = [] - p = {'start': 0} - while True: - url = link.format(**p) - r = send_list_request(module, client, url) - if not r: - break - - for item in r: - if name == item.get("name"): - result.append(item) - - if len(result) > 1: - break - - p['start'] += len(r) - - return result - - -def build_create_parameters(opts): - params = dict() - - v = navigate_value(opts, ["availability_zone"], None) - if not is_empty_value(v): - params["availability_zone"] = v - - v = navigate_value(opts, ["backup_id"], None) - if not is_empty_value(v): - params["backup_id"] = v - - v = navigate_value(opts, ["description"], None) - if not is_empty_value(v): - params["description"] = v - - v = navigate_value(opts, ["enterprise_project_id"], None) - if not is_empty_value(v): - params["enterprise_project_id"] = v - - v = navigate_value(opts, ["image_id"], None) - if not is_empty_value(v): - params["imageRef"] = v - - v = expand_create_metadata(opts, None) - if not is_empty_value(v): - params["metadata"] = v - - v = navigate_value(opts, ["enable_share"], None) - if not is_empty_value(v): - params["multiattach"] = v - - v = navigate_value(opts, ["name"], None) - if not is_empty_value(v): - params["name"] = v - - v = navigate_value(opts, ["size"], None) - if not is_empty_value(v): - params["size"] = v - - v = navigate_value(opts, ["snapshot_id"], None) - if not is_empty_value(v): - params["snapshot_id"] = v - - v = navigate_value(opts, ["volume_type"], None) - if not is_empty_value(v): - params["volume_type"] = v - - if not params: - return params - - params = {"volume": params} - - return params - - -def expand_create_metadata(d, array_index): - r = dict() - - v = navigate_value(d, ["encryption_id"], array_index) - if not is_empty_value(v): - r["__system__cmkid"] = v - - v = expand_create_metadata_system_encrypted(d, array_index) - if not is_empty_value(v): - r["__system__encrypted"] = v - - v = expand_create_metadata_full_clone(d, array_index) - if not is_empty_value(v): - r["full_clone"] = v - - v = expand_create_metadata_hw_passthrough(d, array_index) - if not is_empty_value(v): - r["hw:passthrough"] = v - - return r - - -def expand_create_metadata_system_encrypted(d, array_index): - v = navigate_value(d, ["encryption_id"], array_index) - return "1" if v else "" - - -def expand_create_metadata_full_clone(d, array_index): - v = navigate_value(d, ["enable_full_clone"], array_index) - return "0" if v else "" - - -def expand_create_metadata_hw_passthrough(d, array_index): - v = navigate_value(d, ["enable_scsi"], array_index) - if v is None: - return v - return "true" if v else "false" - - -def send_create_request(module, params, client): - url = "cloudvolumes" - try: - r = client.post(url, params) - except HwcClientException as ex: - msg = ("module(hwc_evs_disk): error running " - "api(create), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def build_update_parameters(opts): - params = dict() - - v = navigate_value(opts, ["description"], None) - if v is not None: - params["description"] = v - - v = navigate_value(opts, ["name"], None) - if not is_empty_value(v): - params["name"] = v - - if not params: - return params - - params = {"volume": params} - - return params - - -def send_update_request(module, params, client): - url = build_path(module, "cloudvolumes/{id}") - - try: - r = client.put(url, params) - except HwcClientException as ex: - msg = ("module(hwc_evs_disk): error running " - "api(update), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def send_delete_request(module, params, client): - url = build_path(module, "cloudvolumes/{id}") - - try: - r = client.delete(url, params) - except HwcClientException as ex: - msg = ("module(hwc_evs_disk): error running " - "api(delete), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def build_extend_disk_parameters(opts): - params = dict() - - v = expand_extend_disk_os_extend(opts, None) - if not is_empty_value(v): - params["os-extend"] = v - - return params - - -def expand_extend_disk_os_extend(d, array_index): - r = dict() - - v = navigate_value(d, ["size"], array_index) - if not is_empty_value(v): - r["new_size"] = v - - return r - - -def send_extend_disk_request(module, params, client): - url = build_path(module, "cloudvolumes/{id}/action") - - try: - r = client.post(url, params) - except HwcClientException as ex: - msg = ("module(hwc_evs_disk): error running " - "api(extend_disk), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def async_wait(config, result, client, timeout): - module = config.module - - path_parameters = { - "job_id": ["job_id"], - } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) - - url = build_path(module, "jobs/{job_id}", data) - - def _query_status(): - r = None - try: - r = client.get(url, timeout=timeout) - except HwcClientException: - return None, "" - - try: - s = navigate_value(r, ["status"]) - return r, s - except Exception: - return None, "" - - try: - return wait_to_finish( - ["SUCCESS"], - ["RUNNING", "INIT"], - _query_status, timeout) - except Exception as ex: - module.fail_json(msg="module(hwc_evs_disk): error " - "waiting to be done, error= %s" % str(ex)) - - -def send_read_request(module, client): - url = build_path(module, "os-vendor-volumes/{id}") - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_evs_disk): error running " - "api(read), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["volume"], None) - - -def fill_read_resp_body(body): - result = dict() - - v = fill_read_resp_attachments(body.get("attachments")) - result["attachments"] = v - - result["availability_zone"] = body.get("availability_zone") - - result["bootable"] = body.get("bootable") - - result["created_at"] = body.get("created_at") - - result["description"] = body.get("description") - - result["enterprise_project_id"] = body.get("enterprise_project_id") - - result["id"] = body.get("id") - - v = fill_read_resp_metadata(body.get("metadata")) - result["metadata"] = v - - result["multiattach"] = body.get("multiattach") - - result["name"] = body.get("name") - - result["size"] = body.get("size") - - result["snapshot_id"] = body.get("snapshot_id") - - result["source_volid"] = body.get("source_volid") - - result["status"] = body.get("status") - - result["tags"] = body.get("tags") - - v = fill_read_resp_volume_image_metadata(body.get("volume_image_metadata")) - result["volume_image_metadata"] = v - - result["volume_type"] = body.get("volume_type") - - return result - - -def fill_read_resp_attachments(value): - if not value: - return None - - result = [] - for item in value: - val = dict() - - val["attached_at"] = item.get("attached_at") - - val["attachment_id"] = item.get("attachment_id") - - val["device"] = item.get("device") - - val["server_id"] = item.get("server_id") - - result.append(val) - - return result - - -def fill_read_resp_metadata(value): - if not value: - return None - - result = dict() - - result["__system__cmkid"] = value.get("__system__cmkid") - - result["attached_mode"] = value.get("attached_mode") - - result["full_clone"] = value.get("full_clone") - - result["hw:passthrough"] = value.get("hw:passthrough") - - result["policy"] = value.get("policy") - - result["readonly"] = value.get("readonly") - - return result - - -def fill_read_resp_volume_image_metadata(value): - if not value: - return None - - result = dict() - - result["id"] = value.get("id") - - return result - - -def flatten_options(response, array_index): - r = dict() - - v = flatten_attachments(response, array_index) - r["attachments"] = v - - v = navigate_value(response, ["read", "availability_zone"], array_index) - r["availability_zone"] = v - - v = navigate_value(response, ["read", "metadata", "policy"], array_index) - r["backup_policy_id"] = v - - v = navigate_value(response, ["read", "created_at"], array_index) - r["created_at"] = v - - v = navigate_value(response, ["read", "description"], array_index) - r["description"] = v - - v = flatten_enable_full_clone(response, array_index) - r["enable_full_clone"] = v - - v = flatten_enable_scsi(response, array_index) - r["enable_scsi"] = v - - v = navigate_value(response, ["read", "multiattach"], array_index) - r["enable_share"] = v - - v = navigate_value( - response, ["read", "metadata", "__system__cmkid"], array_index) - r["encryption_id"] = v - - v = navigate_value( - response, ["read", "enterprise_project_id"], array_index) - r["enterprise_project_id"] = v - - v = navigate_value( - response, ["read", "volume_image_metadata", "id"], array_index) - r["image_id"] = v - - v = flatten_is_bootable(response, array_index) - r["is_bootable"] = v - - v = flatten_is_readonly(response, array_index) - r["is_readonly"] = v - - v = navigate_value(response, ["read", "name"], array_index) - r["name"] = v - - v = navigate_value(response, ["read", "size"], array_index) - r["size"] = v - - v = navigate_value(response, ["read", "snapshot_id"], array_index) - r["snapshot_id"] = v - - v = navigate_value(response, ["read", "source_volid"], array_index) - r["source_volume_id"] = v - - v = navigate_value(response, ["read", "status"], array_index) - r["status"] = v - - v = navigate_value(response, ["read", "tags"], array_index) - r["tags"] = v - - v = navigate_value(response, ["read", "volume_type"], array_index) - r["volume_type"] = v - - return r - - -def flatten_attachments(d, array_index): - v = navigate_value(d, ["read", "attachments"], - array_index) - if not v: - return None - n = len(v) - result = [] - - new_ai = dict() - if array_index: - new_ai.update(array_index) - - for i in range(n): - new_ai["read.attachments"] = i - - val = dict() - - v = navigate_value(d, ["read", "attachments", "attached_at"], new_ai) - val["attached_at"] = v - - v = navigate_value(d, ["read", "attachments", "attachment_id"], new_ai) - val["attachment_id"] = v - - v = navigate_value(d, ["read", "attachments", "device"], new_ai) - val["device"] = v - - v = navigate_value(d, ["read", "attachments", "server_id"], new_ai) - val["server_id"] = v - - for v in val.values(): - if v is not None: - result.append(val) - break - - return result if result else None - - -def flatten_enable_full_clone(d, array_index): - v = navigate_value(d, ["read", "metadata", "full_clone"], - array_index) - if v is None: - return v - return True if v == "0" else False - - -def flatten_enable_scsi(d, array_index): - v = navigate_value(d, ["read", "metadata", "hw:passthrough"], - array_index) - if v is None: - return v - return True if v in ["true", "True"] else False - - -def flatten_is_bootable(d, array_index): - v = navigate_value(d, ["read", "bootable"], array_index) - if v is None: - return v - return True if v in ["true", "True"] else False - - -def flatten_is_readonly(d, array_index): - v = navigate_value(d, ["read", "metadata", "readonly"], - array_index) - if v is None: - return v - return True if v in ["true", "True"] else False - - -def set_unreadable_options(opts, states): - states["backup_id"] = opts.get("backup_id") - - -def set_readonly_options(opts, states): - opts["attachments"] = states.get("attachments") - - opts["backup_policy_id"] = states.get("backup_policy_id") - - opts["created_at"] = states.get("created_at") - - opts["is_bootable"] = states.get("is_bootable") - - opts["is_readonly"] = states.get("is_readonly") - - opts["source_volume_id"] = states.get("source_volume_id") - - opts["status"] = states.get("status") - - opts["tags"] = states.get("tags") - - -def send_list_request(module, client, url): - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_evs_disk): error running " - "api(list), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["volumes"], None) - - -def expand_list_metadata(d, array_index): - r = dict() - - v = navigate_value(d, ["encryption_id"], array_index) - r["__system__cmkid"] = v - - r["attached_mode"] = None - - v = navigate_value(d, ["enable_full_clone"], array_index) - r["full_clone"] = v - - v = navigate_value(d, ["enable_scsi"], array_index) - r["hw:passthrough"] = v - - r["policy"] = None - - r["readonly"] = None - - for v in r.values(): - if v is not None: - return r - return None - - -def expand_list_volume_image_metadata(d, array_index): - r = dict() - - v = navigate_value(d, ["image_id"], array_index) - r["id"] = v - - for v in r.values(): - if v is not None: - return r - return None - - -def fill_list_resp_body(body): - result = dict() - - v = fill_list_resp_attachments(body.get("attachments")) - result["attachments"] = v - - result["availability_zone"] = body.get("availability_zone") - - result["bootable"] = body.get("bootable") - - result["created_at"] = body.get("created_at") - - result["description"] = body.get("description") - - result["enterprise_project_id"] = body.get("enterprise_project_id") - - result["id"] = body.get("id") - - v = fill_list_resp_metadata(body.get("metadata")) - result["metadata"] = v - - result["multiattach"] = body.get("multiattach") - - result["name"] = body.get("name") - - result["size"] = body.get("size") - - result["snapshot_id"] = body.get("snapshot_id") - - result["source_volid"] = body.get("source_volid") - - result["status"] = body.get("status") - - result["tags"] = body.get("tags") - - v = fill_list_resp_volume_image_metadata(body.get("volume_image_metadata")) - result["volume_image_metadata"] = v - - result["volume_type"] = body.get("volume_type") - - return result - - -def fill_list_resp_attachments(value): - if not value: - return None - - result = [] - for item in value: - val = dict() - - val["attached_at"] = item.get("attached_at") - - val["attachment_id"] = item.get("attachment_id") - - val["device"] = item.get("device") - - val["server_id"] = item.get("server_id") - - result.append(val) - - return result - - -def fill_list_resp_metadata(value): - if not value: - return None - - result = dict() - - result["__system__cmkid"] = value.get("__system__cmkid") - - result["attached_mode"] = value.get("attached_mode") - - result["full_clone"] = value.get("full_clone") - - result["hw:passthrough"] = value.get("hw:passthrough") - - result["policy"] = value.get("policy") - - result["readonly"] = value.get("readonly") - - return result - - -def fill_list_resp_volume_image_metadata(value): - if not value: - return None - - result = dict() - - result["id"] = value.get("id") - - return result - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_network_vpc.py b/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_network_vpc.py deleted file mode 100644 index f53369ad..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_network_vpc.py +++ /dev/null @@ -1,493 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (C) 2018 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -############################################################################### -# Documentation -############################################################################### - -DOCUMENTATION = ''' ---- -module: hwc_network_vpc -description: - - Represents an vpc resource. -short_description: Creates a Huawei Cloud VPC -author: Huawei Inc. (@huaweicloud) -requirements: - - requests >= 2.18.4 - - keystoneauth1 >= 3.6.0 -options: - state: - description: - - Whether the given object should exist in vpc. - type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: - description: - - The timeouts for each operations. - type: dict - suboptions: - create: - description: - - The timeout for create operation. - type: str - default: '15m' - update: - description: - - The timeout for update operation. - type: str - default: '15m' - delete: - description: - - The timeout for delete operation. - type: str - default: '15m' - name: - description: - - The name of vpc. - type: str - required: true - cidr: - description: - - The range of available subnets in the vpc. - type: str - required: true -extends_documentation_fragment: -- community.general.hwc - -''' - -EXAMPLES = ''' -- name: Create a vpc - community.general.hwc_network_vpc: - identity_endpoint: "{{ identity_endpoint }}" - user: "{{ user }}" - password: "{{ password }}" - domain: "{{ domain }}" - project: "{{ project }}" - region: "{{ region }}" - name: "vpc_1" - cidr: "192.168.100.0/24" - state: present -''' - -RETURN = ''' - id: - description: - - the id of vpc. - type: str - returned: success - name: - description: - - the name of vpc. - type: str - returned: success - cidr: - description: - - the range of available subnets in the vpc. - type: str - returned: success - status: - description: - - the status of vpc. - type: str - returned: success - routes: - description: - - the route information. - type: complex - returned: success - contains: - destination: - description: - - the destination network segment of a route. - type: str - returned: success - next_hop: - description: - - the next hop of a route. If the route type is peering, - it will provide VPC peering connection ID. - type: str - returned: success - enable_shared_snat: - description: - - show whether the shared snat is enabled. - type: bool - returned: success -''' - -############################################################################### -# Imports -############################################################################### - -from ansible_collections.community.general.plugins.module_utils.hwc_utils import (Config, HwcClientException, - HwcClientException404, HwcModule, - are_different_dicts, is_empty_value, - wait_to_finish, get_region, - build_path, navigate_value) -import re - -############################################################################### -# Main -############################################################################### - - -def main(): - """Main function""" - - module = HwcModule( - argument_spec=dict( - state=dict( - default='present', choices=['present', 'absent'], type='str'), - timeouts=dict(type='dict', options=dict( - create=dict(default='15m', type='str'), - update=dict(default='15m', type='str'), - delete=dict(default='15m', type='str'), - ), default=dict()), - name=dict(required=True, type='str'), - cidr=dict(required=True, type='str') - ), - supports_check_mode=True, - ) - config = Config(module, 'vpc') - - state = module.params['state'] - - if (not module.params.get("id")) and module.params.get("name"): - module.params['id'] = get_id_by_name(config) - - fetch = None - link = self_link(module) - # the link will include Nones if required format parameters are missed - if not re.search('/None/|/None$', link): - client = config.client(get_region(module), "vpc", "project") - fetch = fetch_resource(module, client, link) - if fetch: - fetch = fetch.get('vpc') - changed = False - - if fetch: - if state == 'present': - expect = _get_editable_properties(module) - current_state = response_to_hash(module, fetch) - current = {"cidr": current_state["cidr"]} - if are_different_dicts(expect, current): - if not module.check_mode: - fetch = update(config, self_link(module)) - fetch = response_to_hash(module, fetch.get('vpc')) - changed = True - else: - fetch = current_state - else: - if not module.check_mode: - delete(config, self_link(module)) - fetch = {} - changed = True - else: - if state == 'present': - if not module.check_mode: - fetch = create(config, "vpcs") - fetch = response_to_hash(module, fetch.get('vpc')) - changed = True - else: - fetch = {} - - fetch.update({'changed': changed}) - - module.exit_json(**fetch) - - -def create(config, link): - module = config.module - client = config.client(get_region(module), "vpc", "project") - - r = None - try: - r = client.post(link, resource_to_create(module)) - except HwcClientException as ex: - msg = ("module(hwc_network_vpc): error creating " - "resource, error: %s" % str(ex)) - module.fail_json(msg=msg) - - wait_done = wait_for_operation(config, 'create', r) - v = "" - try: - v = navigate_value(wait_done, ['vpc', 'id']) - except Exception as ex: - module.fail_json(msg=str(ex)) - - url = build_path(module, 'vpcs/{op_id}', {'op_id': v}) - return fetch_resource(module, client, url) - - -def update(config, link): - module = config.module - client = config.client(get_region(module), "vpc", "project") - - r = None - try: - r = client.put(link, resource_to_update(module)) - except HwcClientException as ex: - msg = ("module(hwc_network_vpc): error updating " - "resource, error: %s" % str(ex)) - module.fail_json(msg=msg) - - wait_for_operation(config, 'update', r) - - return fetch_resource(module, client, link) - - -def delete(config, link): - module = config.module - client = config.client(get_region(module), "vpc", "project") - - try: - client.delete(link) - except HwcClientException as ex: - msg = ("module(hwc_network_vpc): error deleting " - "resource, error: %s" % str(ex)) - module.fail_json(msg=msg) - - wait_for_delete(module, client, link) - - -def fetch_resource(module, client, link): - try: - return client.get(link) - except HwcClientException as ex: - msg = ("module(hwc_network_vpc): error fetching " - "resource, error: %s" % str(ex)) - module.fail_json(msg=msg) - - -def get_id_by_name(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - name = module.params.get("name") - link = "vpcs" - query_link = "?marker={marker}&limit=10" - link += query_link - not_format_keys = re.findall("={marker}", link) - none_values = re.findall("=None", link) - - if not (not_format_keys or none_values): - r = None - try: - r = client.get(link) - except Exception: - pass - if r is None: - return None - r = r.get('vpcs', []) - ids = [ - i.get('id') for i in r if i.get('name', '') == name - ] - if not ids: - return None - elif len(ids) == 1: - return ids[0] - else: - module.fail_json( - msg="Multiple resources with same name are found.") - elif none_values: - module.fail_json( - msg="Can not find id by name because url includes None.") - else: - p = {'marker': ''} - ids = set() - while True: - r = None - try: - r = client.get(link.format(**p)) - except Exception: - pass - if r is None: - break - r = r.get('vpcs', []) - if r == []: - break - for i in r: - if i.get('name') == name: - ids.add(i.get('id')) - if len(ids) >= 2: - module.fail_json( - msg="Multiple resources with same name are found.") - - p['marker'] = r[-1].get('id') - - return ids.pop() if ids else None - - -def self_link(module): - return build_path(module, "vpcs/{id}") - - -def resource_to_create(module): - params = dict() - - v = module.params.get('cidr') - if not is_empty_value(v): - params["cidr"] = v - - v = module.params.get('name') - if not is_empty_value(v): - params["name"] = v - - if not params: - return params - - params = {"vpc": params} - - return params - - -def resource_to_update(module): - params = dict() - - v = module.params.get('cidr') - if not is_empty_value(v): - params["cidr"] = v - - if not params: - return params - - params = {"vpc": params} - - return params - - -def _get_editable_properties(module): - return { - "cidr": module.params.get("cidr"), - } - - -def response_to_hash(module, response): - """ Remove unnecessary properties from the response. - This is for doing comparisons with Ansible's current parameters. - """ - return { - u'id': response.get(u'id'), - u'name': response.get(u'name'), - u'cidr': response.get(u'cidr'), - u'status': response.get(u'status'), - u'routes': VpcRoutesArray( - response.get(u'routes', []), module).from_response(), - u'enable_shared_snat': response.get(u'enable_shared_snat') - } - - -def wait_for_operation(config, op_type, op_result): - module = config.module - op_id = "" - try: - op_id = navigate_value(op_result, ['vpc', 'id']) - except Exception as ex: - module.fail_json(msg=str(ex)) - - url = build_path(module, "vpcs/{op_id}", {'op_id': op_id}) - timeout = 60 * int(module.params['timeouts'][op_type].rstrip('m')) - states = { - 'create': { - 'allowed': ['CREATING', 'DONW', 'OK'], - 'complete': ['OK'], - }, - 'update': { - 'allowed': ['PENDING_UPDATE', 'DONW', 'OK'], - 'complete': ['OK'], - } - } - - return wait_for_completion(url, timeout, states[op_type]['allowed'], - states[op_type]['complete'], config) - - -def wait_for_completion(op_uri, timeout, allowed_states, - complete_states, config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - - def _refresh_status(): - r = None - try: - r = fetch_resource(module, client, op_uri) - except Exception: - return None, "" - - status = "" - try: - status = navigate_value(r, ['vpc', 'status']) - except Exception: - return None, "" - - return r, status - - try: - return wait_to_finish(complete_states, allowed_states, - _refresh_status, timeout) - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def wait_for_delete(module, client, link): - - def _refresh_status(): - try: - client.get(link) - except HwcClientException404: - return True, "Done" - - except Exception: - return None, "" - - return True, "Pending" - - timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m')) - try: - return wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout) - except Exception as ex: - module.fail_json(msg=str(ex)) - - -class VpcRoutesArray(object): - def __init__(self, request, module): - self.module = module - if request: - self.request = request - else: - self.request = [] - - def to_request(self): - items = [] - for item in self.request: - items.append(self._request_for_item(item)) - return items - - def from_response(self): - items = [] - for item in self.request: - items.append(self._response_from_item(item)) - return items - - def _request_for_item(self, item): - return { - u'destination': item.get('destination'), - u'nexthop': item.get('next_hop') - } - - def _response_from_item(self, item): - return { - u'destination': item.get(u'destination'), - u'next_hop': item.get(u'nexthop') - } - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_smn_topic.py b/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_smn_topic.py deleted file mode 100644 index f7fb4fae..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_smn_topic.py +++ /dev/null @@ -1,338 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -############################################################################### -# Documentation -############################################################################### - -DOCUMENTATION = ''' ---- -module: hwc_smn_topic -description: - - Represents a SMN notification topic resource. -short_description: Creates a resource of SMNTopic in Huaweicloud Cloud -author: Huawei Inc. (@huaweicloud) -requirements: - - requests >= 2.18.4 - - keystoneauth1 >= 3.6.0 -options: - state: - description: - - Whether the given object should exist in Huaweicloud Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - display_name: - description: - - Topic display name, which is presented as the name of the email - sender in an email message. The topic display name contains a - maximum of 192 bytes. - type: str - required: false - name: - description: - - Name of the topic to be created. The topic name is a string of 1 - to 256 characters. It must contain upper- or lower-case letters, - digits, hyphens (-), and underscores C(_), and must start with a - letter or digit. - type: str - required: true -extends_documentation_fragment: -- community.general.hwc - -''' - -EXAMPLES = ''' -- name: Create a smn topic - community.general.hwc_smn_topic: - identity_endpoint: "{{ identity_endpoint }}" - user_name: "{{ user_name }}" - password: "{{ password }}" - domain_name: "{{ domain_name }}" - project_name: "{{ project_name }}" - region: "{{ region }}" - name: "ansible_smn_topic_test" - state: present -''' - -RETURN = ''' -create_time: - description: - - Time when the topic was created. - returned: success - type: str -display_name: - description: - - Topic display name, which is presented as the name of the email - sender in an email message. The topic display name contains a - maximum of 192 bytes. - returned: success - type: str -name: - description: - - Name of the topic to be created. The topic name is a string of 1 - to 256 characters. It must contain upper- or lower-case letters, - digits, hyphens (-), and underscores C(_), and must start with a - letter or digit. - returned: success - type: str -push_policy: - description: - - Message pushing policy. 0 indicates that the message sending - fails and the message is cached in the queue. 1 indicates that - the failed message is discarded. - returned: success - type: int -topic_urn: - description: - - Resource identifier of a topic, which is unique. - returned: success - type: str -update_time: - description: - - Time when the topic was updated. - returned: success - type: str -''' - -############################################################################### -# Imports -############################################################################### - -from ansible_collections.community.general.plugins.module_utils.hwc_utils import (Config, HwcClientException, - HwcModule, navigate_value, - are_different_dicts, is_empty_value, - build_path, get_region) -import re - -############################################################################### -# Main -############################################################################### - - -def main(): - """Main function""" - - module = HwcModule( - argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - display_name=dict(type='str'), - name=dict(required=True, type='str') - ), - supports_check_mode=True, - ) - - config = Config(module, "smn") - - state = module.params['state'] - - if not module.params.get("id"): - module.params['id'] = get_resource_id(config) - - fetch = None - link = self_link(module) - # the link will include Nones if required format parameters are missed - if not re.search('/None/|/None$', link): - client = config.client(get_region(module), "smn", "project") - fetch = fetch_resource(module, client, link) - changed = False - - if fetch: - if state == 'present': - expect = _get_resource_editable_properties(module) - current_state = response_to_hash(module, fetch) - current = {'display_name': current_state['display_name']} - if are_different_dicts(expect, current): - if not module.check_mode: - fetch = update(config) - fetch = response_to_hash(module, fetch) - changed = True - else: - fetch = current_state - else: - if not module.check_mode: - delete(config) - fetch = {} - changed = True - else: - if state == 'present': - if not module.check_mode: - fetch = create(config) - fetch = response_to_hash(module, fetch) - changed = True - else: - fetch = {} - - fetch.update({'changed': changed}) - - module.exit_json(**fetch) - - -def create(config): - module = config.module - client = config.client(get_region(module), "smn", "project") - - link = "notifications/topics" - r = None - try: - r = client.post(link, create_resource_opts(module)) - except HwcClientException as ex: - msg = ("module(hwc_smn_topic): error creating " - "resource, error: %s" % str(ex)) - module.fail_json(msg=msg) - - return get_resource(config, r) - - -def update(config): - module = config.module - client = config.client(get_region(module), "smn", "project") - - link = self_link(module) - try: - client.put(link, update_resource_opts(module)) - except HwcClientException as ex: - msg = ("module(hwc_smn_topic): error updating " - "resource, error: %s" % str(ex)) - module.fail_json(msg=msg) - - return fetch_resource(module, client, link) - - -def delete(config): - module = config.module - client = config.client(get_region(module), "smn", "project") - - link = self_link(module) - try: - client.delete(link) - except HwcClientException as ex: - msg = ("module(hwc_smn_topic): error deleting " - "resource, error: %s" % str(ex)) - module.fail_json(msg=msg) - - -def fetch_resource(module, client, link): - try: - return client.get(link) - except HwcClientException as ex: - msg = ("module(hwc_smn_topic): error fetching " - "resource, error: %s" % str(ex)) - module.fail_json(msg=msg) - - -def get_resource(config, result): - module = config.module - client = config.client(get_region(module), "smn", "project") - - v = "" - try: - v = navigate_value(result, ['topic_urn']) - except Exception as ex: - module.fail_json(msg=str(ex)) - - d = {'topic_urn': v} - url = build_path(module, 'notifications/topics/{topic_urn}', d) - - return fetch_resource(module, client, url) - - -def get_resource_id(config): - module = config.module - client = config.client(get_region(module), "smn", "project") - - link = "notifications/topics" - query_link = "?offset={offset}&limit=10" - link += query_link - - p = {'offset': 0} - v = module.params.get('name') - ids = set() - while True: - r = None - try: - r = client.get(link.format(**p)) - except Exception: - pass - if r is None: - break - r = r.get('topics', []) - if r == []: - break - for i in r: - if i.get('name') == v: - ids.add(i.get('topic_urn')) - if len(ids) >= 2: - module.fail_json(msg="Multiple resources are found") - - p['offset'] += 1 - - return ids.pop() if ids else None - - -def self_link(module): - return build_path(module, "notifications/topics/{id}") - - -def create_resource_opts(module): - params = dict() - - v = module.params.get('display_name') - if not is_empty_value(v): - params["display_name"] = v - - v = module.params.get('name') - if not is_empty_value(v): - params["name"] = v - - return params - - -def update_resource_opts(module): - params = dict() - - v = module.params.get('display_name') - if not is_empty_value(v): - params["display_name"] = v - - return params - - -def _get_resource_editable_properties(module): - return { - "display_name": module.params.get("display_name"), - } - - -def response_to_hash(module, response): - """Remove unnecessary properties from the response. - This is for doing comparisons with Ansible's current parameters. - """ - return { - u'create_time': response.get(u'create_time'), - u'display_name': response.get(u'display_name'), - u'name': response.get(u'name'), - u'push_policy': _push_policy_convert_from_response( - response.get('push_policy')), - u'topic_urn': response.get(u'topic_urn'), - u'update_time': response.get(u'update_time') - } - - -def _push_policy_convert_from_response(value): - return { - 0: "the message sending fails and is cached in the queue", - 1: "the failed message is discarded", - }.get(int(value)) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_eip.py b/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_eip.py deleted file mode 100644 index b53395f8..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_eip.py +++ /dev/null @@ -1,877 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -############################################################################### -# Documentation -############################################################################### - -DOCUMENTATION = ''' ---- -module: hwc_vpc_eip -description: - - elastic ip management. -short_description: Creates a resource of Vpc/EIP in Huawei Cloud -version_added: '0.2.0' -author: Huawei Inc. (@huaweicloud) -requirements: - - keystoneauth1 >= 3.6.0 -options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: - description: - - The timeouts for each operations. - type: dict - suboptions: - create: - description: - - The timeouts for create operation. - type: str - default: '5m' - update: - description: - - The timeouts for update operation. - type: str - default: '5m' - type: - description: - - Specifies the EIP type. - type: str - required: true - dedicated_bandwidth: - description: - - Specifies the dedicated bandwidth object. - type: dict - required: false - suboptions: - charge_mode: - description: - - Specifies whether the bandwidth is billed by traffic or - by bandwidth size. The value can be bandwidth or traffic. - If this parameter is left blank or is null character - string, default value bandwidth is used. For IPv6 - addresses, the default parameter value is bandwidth - outside China and is traffic in China. - type: str - required: true - name: - description: - - Specifies the bandwidth name. The value is a string of 1 - to 64 characters that can contain letters, digits, - underscores C(_), hyphens (-), and periods (.). - type: str - required: true - size: - description: - - Specifies the bandwidth size. The value ranges from 1 - Mbit/s to 2000 Mbit/s by default. (The specific range may - vary depending on the configuration in each region. You - can see the bandwidth range of each region on the - management console.) The minimum unit for bandwidth - adjustment varies depending on the bandwidth range. The - details are as follows. - - The minimum unit is 1 Mbit/s if the allowed bandwidth - size ranges from 0 to 300 Mbit/s (with 300 Mbit/s - included). - - The minimum unit is 50 Mbit/s if the allowed bandwidth - size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s - included). - - The minimum unit is 500 Mbit/s if the allowed bandwidth - size is greater than 1000 Mbit/s. - type: int - required: true - enterprise_project_id: - description: - - Specifies the enterprise project ID. - type: str - required: false - ip_version: - description: - - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this - parameter is left blank, an IPv4 address will be assigned. - type: int - required: false - ipv4_address: - description: - - Specifies the obtained IPv4 EIP. The system automatically assigns - an EIP if you do not specify it. - type: str - required: false - port_id: - description: - - Specifies the port ID. This parameter is returned only when a - private IP address is bound with the EIP. - type: str - required: false - shared_bandwidth_id: - description: - - Specifies the ID of shared bandwidth. - type: str - required: false -extends_documentation_fragment: -- community.general.hwc - -''' - -EXAMPLES = ''' -# create an eip and bind it to a port -- name: Create vpc - hwc_network_vpc: - cidr: "192.168.100.0/24" - name: "ansible_network_vpc_test" - register: vpc -- name: Create subnet - hwc_vpc_subnet: - gateway_ip: "192.168.100.32" - name: "ansible_network_subnet_test" - dhcp_enable: True - vpc_id: "{{ vpc.id }}" - cidr: "192.168.100.0/26" - register: subnet -- name: Create a port - hwc_vpc_port: - subnet_id: "{{ subnet.id }}" - ip_address: "192.168.100.33" - register: port -- name: Create an eip and bind it to a port - community.general.hwc_vpc_eip: - type: "5_bgp" - dedicated_bandwidth: - charge_mode: "traffic" - name: "ansible_test_dedicated_bandwidth" - size: 1 - port_id: "{{ port.id }}" -''' - -RETURN = ''' - type: - description: - - Specifies the EIP type. - type: str - returned: success - dedicated_bandwidth: - description: - - Specifies the dedicated bandwidth object. - type: dict - returned: success - contains: - charge_mode: - description: - - Specifies whether the bandwidth is billed by traffic or - by bandwidth size. The value can be bandwidth or traffic. - If this parameter is left blank or is null character - string, default value bandwidth is used. For IPv6 - addresses, the default parameter value is bandwidth - outside China and is traffic in China. - type: str - returned: success - name: - description: - - Specifies the bandwidth name. The value is a string of 1 - to 64 characters that can contain letters, digits, - underscores C(_), hyphens (-), and periods (.). - type: str - returned: success - size: - description: - - Specifies the bandwidth size. The value ranges from 1 - Mbit/s to 2000 Mbit/s by default. (The specific range may - vary depending on the configuration in each region. You - can see the bandwidth range of each region on the - management console.) The minimum unit for bandwidth - adjustment varies depending on the bandwidth range. The - details are as follows:. - - The minimum unit is 1 Mbit/s if the allowed bandwidth - size ranges from 0 to 300 Mbit/s (with 300 Mbit/s - included). - - The minimum unit is 50 Mbit/s if the allowed bandwidth - size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s - included). - - The minimum unit is 500 Mbit/s if the allowed bandwidth - size is greater than 1000 Mbit/s. - type: int - returned: success - id: - description: - - Specifies the ID of dedicated bandwidth. - type: str - returned: success - enterprise_project_id: - description: - - Specifies the enterprise project ID. - type: str - returned: success - ip_version: - description: - - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this - parameter is left blank, an IPv4 address will be assigned. - type: int - returned: success - ipv4_address: - description: - - Specifies the obtained IPv4 EIP. The system automatically assigns - an EIP if you do not specify it. - type: str - returned: success - port_id: - description: - - Specifies the port ID. This parameter is returned only when a - private IP address is bound with the EIP. - type: str - returned: success - shared_bandwidth_id: - description: - - Specifies the ID of shared bandwidth. - type: str - returned: success - create_time: - description: - - Specifies the time (UTC time) when the EIP was assigned. - type: str - returned: success - ipv6_address: - description: - - Specifies the obtained IPv6 EIP. - type: str - returned: success - private_ip_address: - description: - - Specifies the private IP address bound with the EIP. This - parameter is returned only when a private IP address is bound - with the EIP. - type: str - returned: success -''' - -from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( - Config, HwcClientException, HwcClientException404, HwcModule, - are_different_dicts, build_path, get_region, is_empty_value, - navigate_value, wait_to_finish) - - -def build_module(): - return HwcModule( - argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - timeouts=dict(type='dict', options=dict( - create=dict(default='5m', type='str'), - update=dict(default='5m', type='str'), - ), default=dict()), - type=dict(type='str', required=True), - dedicated_bandwidth=dict(type='dict', options=dict( - charge_mode=dict(type='str', required=True), - name=dict(type='str', required=True), - size=dict(type='int', required=True) - )), - enterprise_project_id=dict(type='str'), - ip_version=dict(type='int'), - ipv4_address=dict(type='str'), - port_id=dict(type='str'), - shared_bandwidth_id=dict(type='str') - ), - supports_check_mode=True, - ) - - -def main(): - """Main function""" - - module = build_module() - config = Config(module, "vpc") - - try: - resource = None - if module.params['id']: - resource = True - else: - v = search_resource(config) - if len(v) > 1: - raise Exception("Found more than one resource(%s)" % ", ".join([ - navigate_value(i, ["id"]) for i in v])) - - if len(v) == 1: - resource = v[0] - module.params['id'] = navigate_value(resource, ["id"]) - - result = {} - changed = False - if module.params['state'] == 'present': - if resource is None: - if not module.check_mode: - create(config) - changed = True - - current = read_resource(config, exclude_output=True) - expect = user_input_parameters(module) - if are_different_dicts(expect, current): - if not module.check_mode: - update(config) - changed = True - - result = read_resource(config) - result['id'] = module.params.get('id') - else: - if resource: - if not module.check_mode: - delete(config) - changed = True - - except Exception as ex: - module.fail_json(msg=str(ex)) - - else: - result['changed'] = changed - module.exit_json(**result) - - -def user_input_parameters(module): - return { - "dedicated_bandwidth": module.params.get("dedicated_bandwidth"), - "enterprise_project_id": module.params.get("enterprise_project_id"), - "ip_version": module.params.get("ip_version"), - "ipv4_address": module.params.get("ipv4_address"), - "port_id": module.params.get("port_id"), - "shared_bandwidth_id": module.params.get("shared_bandwidth_id"), - "type": module.params.get("type"), - } - - -def create(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) - opts = user_input_parameters(module) - - params = build_create_parameters(opts) - r = send_create_request(module, params, client) - obj = async_wait_create(config, r, client, timeout) - module.params['id'] = navigate_value(obj, ["publicip", "id"]) - - -def update(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - timeout = 60 * int(module.params['timeouts']['update'].rstrip('m')) - opts = user_input_parameters(module) - - params = build_update_parameters(opts) - if params: - r = send_update_request(module, params, client) - async_wait_update(config, r, client, timeout) - - -def delete(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - - if module.params["port_id"]: - module.params["port_id"] = "" - update(config) - - send_delete_request(module, None, client) - - url = build_path(module, "publicips/{id}") - - def _refresh_status(): - try: - client.get(url) - except HwcClientException404: - return True, "Done" - - except Exception: - return None, "" - - return True, "Pending" - - timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) - try: - wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout) - except Exception as ex: - module.fail_json(msg="module(hwc_vpc_eip): error " - "waiting for api(delete) to " - "be done, error= %s" % str(ex)) - - -def read_resource(config, exclude_output=False): - module = config.module - client = config.client(get_region(module), "vpc", "project") - - res = {} - - r = send_read_request(module, client) - res["read"] = fill_read_resp_body(r) - - return update_properties(module, res, None, exclude_output) - - -def _build_query_link(opts): - query_params = [] - - v = navigate_value(opts, ["ip_version"]) - if v: - query_params.append("ip_version=" + str(v)) - - v = navigate_value(opts, ["enterprise_project_id"]) - if v: - query_params.append("enterprise_project_id=" + str(v)) - - query_link = "?marker={marker}&limit=10" - if query_params: - query_link += "&" + "&".join(query_params) - - return query_link - - -def search_resource(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - opts = user_input_parameters(module) - identity_obj = _build_identity_object(opts) - query_link = _build_query_link(opts) - link = "publicips" + query_link - - result = [] - p = {'marker': ''} - while True: - url = link.format(**p) - r = send_list_request(module, client, url) - if not r: - break - - for item in r: - item = fill_list_resp_body(item) - if not are_different_dicts(identity_obj, item): - result.append(item) - - if len(result) > 1: - break - - p['marker'] = r[-1].get('id') - - return result - - -def build_create_parameters(opts): - params = dict() - - v = expand_create_bandwidth(opts, None) - if not is_empty_value(v): - params["bandwidth"] = v - - v = navigate_value(opts, ["enterprise_project_id"], None) - if not is_empty_value(v): - params["enterprise_project_id"] = v - - v = expand_create_publicip(opts, None) - if not is_empty_value(v): - params["publicip"] = v - - return params - - -def expand_create_bandwidth(d, array_index): - v = navigate_value(d, ["dedicated_bandwidth"], array_index) - sbwid = navigate_value(d, ["shared_bandwidth_id"], array_index) - if v and sbwid: - raise Exception("don't input shared_bandwidth_id and " - "dedicated_bandwidth at same time") - - if not (v or sbwid): - raise Exception("must input shared_bandwidth_id or " - "dedicated_bandwidth") - - if sbwid: - return { - "id": sbwid, - "share_type": "WHOLE"} - - return { - "charge_mode": v["charge_mode"], - "name": v["name"], - "share_type": "PER", - "size": v["size"]} - - -def expand_create_publicip(d, array_index): - r = dict() - - v = navigate_value(d, ["ipv4_address"], array_index) - if not is_empty_value(v): - r["ip_address"] = v - - v = navigate_value(d, ["ip_version"], array_index) - if not is_empty_value(v): - r["ip_version"] = v - - v = navigate_value(d, ["type"], array_index) - if not is_empty_value(v): - r["type"] = v - - return r - - -def send_create_request(module, params, client): - url = "publicips" - try: - r = client.post(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_eip): error running " - "api(create), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def async_wait_create(config, result, client, timeout): - module = config.module - - path_parameters = { - "publicip_id": ["publicip", "id"], - } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) - - url = build_path(module, "publicips/{publicip_id}", data) - - def _query_status(): - r = None - try: - r = client.get(url, timeout=timeout) - except HwcClientException: - return None, "" - - try: - s = navigate_value(r, ["publicip", "status"]) - return r, s - except Exception: - return None, "" - - try: - return wait_to_finish( - ["ACTIVE", "DOWN"], - None, - _query_status, timeout) - except Exception as ex: - module.fail_json(msg="module(hwc_vpc_eip): error " - "waiting for api(create) to " - "be done, error= %s" % str(ex)) - - -def build_update_parameters(opts): - params = dict() - - v = navigate_value(opts, ["ip_version"], None) - if not is_empty_value(v): - params["ip_version"] = v - - v = navigate_value(opts, ["port_id"], None) - if v is not None: - params["port_id"] = v - - if not params: - return params - - params = {"publicip": params} - - return params - - -def send_update_request(module, params, client): - url = build_path(module, "publicips/{id}") - - try: - r = client.put(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_eip): error running " - "api(update), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def async_wait_update(config, result, client, timeout): - module = config.module - - url = build_path(module, "publicips/{id}") - - def _query_status(): - r = None - try: - r = client.get(url, timeout=timeout) - except HwcClientException: - return None, "" - - try: - s = navigate_value(r, ["publicip", "status"]) - return r, s - except Exception: - return None, "" - - try: - return wait_to_finish( - ["ACTIVE", "DOWN"], - None, - _query_status, timeout) - except Exception as ex: - module.fail_json(msg="module(hwc_vpc_eip): error " - "waiting for api(update) to " - "be done, error= %s" % str(ex)) - - -def send_delete_request(module, params, client): - url = build_path(module, "publicips/{id}") - - try: - r = client.delete(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_eip): error running " - "api(delete), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def send_read_request(module, client): - url = build_path(module, "publicips/{id}") - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_vpc_eip): error running " - "api(read), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["publicip"], None) - - -def fill_read_resp_body(body): - result = dict() - - result["bandwidth_id"] = body.get("bandwidth_id") - - result["bandwidth_name"] = body.get("bandwidth_name") - - result["bandwidth_share_type"] = body.get("bandwidth_share_type") - - result["bandwidth_size"] = body.get("bandwidth_size") - - result["create_time"] = body.get("create_time") - - result["enterprise_project_id"] = body.get("enterprise_project_id") - - result["id"] = body.get("id") - - result["ip_version"] = body.get("ip_version") - - result["port_id"] = body.get("port_id") - - result["private_ip_address"] = body.get("private_ip_address") - - result["public_ip_address"] = body.get("public_ip_address") - - result["public_ipv6_address"] = body.get("public_ipv6_address") - - result["status"] = body.get("status") - - result["tenant_id"] = body.get("tenant_id") - - result["type"] = body.get("type") - - return result - - -def update_properties(module, response, array_index, exclude_output=False): - r = user_input_parameters(module) - - if not exclude_output: - v = navigate_value(response, ["read", "create_time"], array_index) - r["create_time"] = v - - v = r.get("dedicated_bandwidth") - v = flatten_dedicated_bandwidth(response, array_index, v, exclude_output) - r["dedicated_bandwidth"] = v - - v = navigate_value(response, ["read", "enterprise_project_id"], - array_index) - r["enterprise_project_id"] = v - - v = navigate_value(response, ["read", "ip_version"], array_index) - r["ip_version"] = v - - v = navigate_value(response, ["read", "public_ip_address"], array_index) - r["ipv4_address"] = v - - if not exclude_output: - v = navigate_value(response, ["read", "public_ipv6_address"], - array_index) - r["ipv6_address"] = v - - v = navigate_value(response, ["read", "port_id"], array_index) - r["port_id"] = v - - if not exclude_output: - v = navigate_value(response, ["read", "private_ip_address"], - array_index) - r["private_ip_address"] = v - - v = r.get("shared_bandwidth_id") - v = flatten_shared_bandwidth_id(response, array_index, v, exclude_output) - r["shared_bandwidth_id"] = v - - v = navigate_value(response, ["read", "type"], array_index) - r["type"] = v - - return r - - -def flatten_dedicated_bandwidth(d, array_index, current_value, exclude_output): - v = navigate_value(d, ["read", "bandwidth_share_type"], array_index) - if not (v and v == "PER"): - return current_value - - result = current_value - if not result: - result = dict() - - if not exclude_output: - v = navigate_value(d, ["read", "bandwidth_id"], array_index) - if v is not None: - result["id"] = v - - v = navigate_value(d, ["read", "bandwidth_name"], array_index) - if v is not None: - result["name"] = v - - v = navigate_value(d, ["read", "bandwidth_size"], array_index) - if v is not None: - result["size"] = v - - return result if result else current_value - - -def flatten_shared_bandwidth_id(d, array_index, current_value, exclude_output): - v = navigate_value(d, ["read", "bandwidth_id"], array_index) - - v1 = navigate_value(d, ["read", "bandwidth_share_type"], array_index) - - return v if (v1 and v1 == "WHOLE") else current_value - - -def send_list_request(module, client, url): - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_vpc_eip): error running " - "api(list), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["publicips"], None) - - -def _build_identity_object(all_opts): - result = dict() - - v = expand_list_bandwidth_id(all_opts, None) - result["bandwidth_id"] = v - - v = navigate_value(all_opts, ["dedicated_bandwidth", "name"], None) - result["bandwidth_name"] = v - - result["bandwidth_share_type"] = None - - v = navigate_value(all_opts, ["dedicated_bandwidth", "size"], None) - result["bandwidth_size"] = v - - result["create_time"] = None - - v = navigate_value(all_opts, ["enterprise_project_id"], None) - result["enterprise_project_id"] = v - - result["id"] = None - - v = navigate_value(all_opts, ["ip_version"], None) - result["ip_version"] = v - - v = navigate_value(all_opts, ["port_id"], None) - result["port_id"] = v - - result["private_ip_address"] = None - - v = navigate_value(all_opts, ["ipv4_address"], None) - result["public_ip_address"] = v - - result["public_ipv6_address"] = None - - result["status"] = None - - result["tenant_id"] = None - - v = navigate_value(all_opts, ["type"], None) - result["type"] = v - - return result - - -def expand_list_bandwidth_id(d, array_index): - v = navigate_value(d, ["dedicated_bandwidth"], array_index) - sbwid = navigate_value(d, ["shared_bandwidth_id"], array_index) - if v and sbwid: - raise Exception("don't input shared_bandwidth_id and " - "dedicated_bandwidth at same time") - - return sbwid - - -def fill_list_resp_body(body): - result = dict() - - result["bandwidth_id"] = body.get("bandwidth_id") - - result["bandwidth_name"] = body.get("bandwidth_name") - - result["bandwidth_share_type"] = body.get("bandwidth_share_type") - - result["bandwidth_size"] = body.get("bandwidth_size") - - result["create_time"] = body.get("create_time") - - result["enterprise_project_id"] = body.get("enterprise_project_id") - - result["id"] = body.get("id") - - result["ip_version"] = body.get("ip_version") - - result["port_id"] = body.get("port_id") - - result["private_ip_address"] = body.get("private_ip_address") - - result["public_ip_address"] = body.get("public_ip_address") - - result["public_ipv6_address"] = body.get("public_ipv6_address") - - result["status"] = body.get("status") - - result["tenant_id"] = body.get("tenant_id") - - result["type"] = body.get("type") - - return result - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_peering_connect.py b/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_peering_connect.py deleted file mode 100644 index a4d5921b..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_peering_connect.py +++ /dev/null @@ -1,691 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) - - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -############################################################################### -# Documentation -############################################################################### - -DOCUMENTATION = ''' ---- -module: hwc_vpc_peering_connect -description: - - vpc peering management. -short_description: Creates a resource of Vpc/PeeringConnect in Huawei Cloud -version_added: '0.2.0' -author: Huawei Inc. (@huaweicloud) -requirements: - - keystoneauth1 >= 3.6.0 -options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: - description: - - The timeouts for each operations. - type: dict - suboptions: - create: - description: - - The timeouts for create operation. - type: str - default: '15m' - local_vpc_id: - description: - - Specifies the ID of local VPC. - type: str - required: true - name: - description: - - Specifies the name of the VPC peering connection. The value can - contain 1 to 64 characters. - type: str - required: true - peering_vpc: - description: - - Specifies information about the peering VPC. - type: dict - required: true - suboptions: - vpc_id: - description: - - Specifies the ID of peering VPC. - type: str - required: true - project_id: - description: - - Specifies the ID of the project which the peering vpc - belongs to. - type: str - required: false - description: - description: - - The description of vpc peering connection. - type: str - required: false -extends_documentation_fragment: -- community.general.hwc - -''' - -EXAMPLES = ''' -# create a peering connect -- name: Create a local vpc - hwc_network_vpc: - cidr: "192.168.0.0/16" - name: "ansible_network_vpc_test_local" - register: vpc1 -- name: Create a peering vpc - hwc_network_vpc: - cidr: "192.168.0.0/16" - name: "ansible_network_vpc_test_peering" - register: vpc2 -- name: Create a peering connect - community.general.hwc_vpc_peering_connect: - local_vpc_id: "{{ vpc1.id }}" - name: "ansible_network_peering_test" - peering_vpc: - vpc_id: "{{ vpc2.id }}" -''' - -RETURN = ''' - local_vpc_id: - description: - - Specifies the ID of local VPC. - type: str - returned: success - name: - description: - - Specifies the name of the VPC peering connection. The value can - contain 1 to 64 characters. - type: str - returned: success - peering_vpc: - description: - - Specifies information about the peering VPC. - type: dict - returned: success - contains: - vpc_id: - description: - - Specifies the ID of peering VPC. - type: str - returned: success - project_id: - description: - - Specifies the ID of the project which the peering vpc - belongs to. - type: str - returned: success - description: - description: - - The description of vpc peering connection. - type: str - returned: success -''' - -from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( - Config, HwcClientException, HwcClientException404, HwcModule, - are_different_dicts, build_path, get_region, is_empty_value, - navigate_value, wait_to_finish) - - -def build_module(): - return HwcModule( - argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - timeouts=dict(type='dict', options=dict( - create=dict(default='15m', type='str'), - ), default=dict()), - local_vpc_id=dict(type='str', required=True), - name=dict(type='str', required=True), - peering_vpc=dict(type='dict', required=True, options=dict( - vpc_id=dict(type='str', required=True), - project_id=dict(type='str') - )), - description=dict(type='str') - ), - supports_check_mode=True, - ) - - -def main(): - """Main function""" - - module = build_module() - config = Config(module, "vpc") - - try: - resource = None - if module.params['id']: - resource = True - else: - v = search_resource(config) - if len(v) > 1: - raise Exception("Found more than one resource(%s)" % ", ".join([ - navigate_value(i, ["id"]) for i in v])) - - if len(v) == 1: - resource = v[0] - module.params['id'] = navigate_value(resource, ["id"]) - - result = {} - changed = False - if module.params['state'] == 'present': - if resource is None: - if not module.check_mode: - create(config) - changed = True - - current = read_resource(config, exclude_output=True) - expect = user_input_parameters(module) - if are_different_dicts(expect, current): - if not module.check_mode: - update(config) - changed = True - - result = read_resource(config) - result['id'] = module.params.get('id') - else: - if resource: - if not module.check_mode: - delete(config) - changed = True - - except Exception as ex: - module.fail_json(msg=str(ex)) - - else: - result['changed'] = changed - module.exit_json(**result) - - -def user_input_parameters(module): - return { - "description": module.params.get("description"), - "local_vpc_id": module.params.get("local_vpc_id"), - "name": module.params.get("name"), - "peering_vpc": module.params.get("peering_vpc"), - } - - -def create(config): - module = config.module - client = config.client(get_region(module), "network", "project") - timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) - opts = user_input_parameters(module) - - params = build_create_parameters(opts) - r = send_create_request(module, params, client) - obj = async_wait_create(config, r, client, timeout) - module.params['id'] = navigate_value(obj, ["peering", "id"]) - - -def update(config): - module = config.module - client = config.client(get_region(module), "network", "project") - opts = user_input_parameters(module) - - params = build_update_parameters(opts) - if params: - send_update_request(module, params, client) - - -def delete(config): - module = config.module - client = config.client(get_region(module), "network", "project") - - send_delete_request(module, None, client) - - url = build_path(module, "v2.0/vpc/peerings/{id}") - - def _refresh_status(): - try: - client.get(url) - except HwcClientException404: - return True, "Done" - - except Exception: - return None, "" - - return True, "Pending" - - timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) - try: - wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout) - except Exception as ex: - module.fail_json(msg="module(hwc_vpc_peering_connect): error " - "waiting for api(delete) to " - "be done, error= %s" % str(ex)) - - -def read_resource(config, exclude_output=False): - module = config.module - client = config.client(get_region(module), "network", "project") - - res = {} - - r = send_read_request(module, client) - res["read"] = fill_read_resp_body(r) - - return update_properties(module, res, None, exclude_output) - - -def _build_query_link(opts): - query_params = [] - - v = navigate_value(opts, ["local_vpc_id"]) - if v: - query_params.append("vpc_id=" + str(v)) - - v = navigate_value(opts, ["name"]) - if v: - query_params.append("name=" + str(v)) - - query_link = "?marker={marker}&limit=10" - if query_params: - query_link += "&" + "&".join(query_params) - - return query_link - - -def search_resource(config): - module = config.module - client = config.client(get_region(module), "network", "project") - opts = user_input_parameters(module) - identity_obj = _build_identity_object(opts) - query_link = _build_query_link(opts) - link = "v2.0/vpc/peerings" + query_link - - result = [] - p = {'marker': ''} - while True: - url = link.format(**p) - r = send_list_request(module, client, url) - if not r: - break - - for item in r: - item = fill_list_resp_body(item) - if not are_different_dicts(identity_obj, item): - result.append(item) - - if len(result) > 1: - break - - p['marker'] = r[-1].get('id') - - return result - - -def build_create_parameters(opts): - params = dict() - - v = expand_create_accept_vpc_info(opts, None) - if not is_empty_value(v): - params["accept_vpc_info"] = v - - v = navigate_value(opts, ["description"], None) - if not is_empty_value(v): - params["description"] = v - - v = navigate_value(opts, ["name"], None) - if not is_empty_value(v): - params["name"] = v - - v = expand_create_request_vpc_info(opts, None) - if not is_empty_value(v): - params["request_vpc_info"] = v - - if not params: - return params - - params = {"peering": params} - - return params - - -def expand_create_accept_vpc_info(d, array_index): - r = dict() - - v = navigate_value(d, ["peering_vpc", "project_id"], array_index) - if not is_empty_value(v): - r["tenant_id"] = v - - v = navigate_value(d, ["peering_vpc", "vpc_id"], array_index) - if not is_empty_value(v): - r["vpc_id"] = v - - return r - - -def expand_create_request_vpc_info(d, array_index): - r = dict() - - r["tenant_id"] = "" - - v = navigate_value(d, ["local_vpc_id"], array_index) - if not is_empty_value(v): - r["vpc_id"] = v - - return r - - -def send_create_request(module, params, client): - url = "v2.0/vpc/peerings" - try: - r = client.post(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_peering_connect): error running " - "api(create), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def async_wait_create(config, result, client, timeout): - module = config.module - - path_parameters = { - "peering_id": ["peering", "id"], - } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) - - url = build_path(module, "v2.0/vpc/peerings/{peering_id}", data) - - def _query_status(): - r = None - try: - r = client.get(url, timeout=timeout) - except HwcClientException: - return None, "" - - try: - s = navigate_value(r, ["peering", "status"]) - return r, s - except Exception: - return None, "" - - try: - return wait_to_finish( - ["ACTIVE"], - ["PENDING_ACCEPTANCE"], - _query_status, timeout) - except Exception as ex: - module.fail_json(msg="module(hwc_vpc_peering_connect): error " - "waiting for api(create) to " - "be done, error= %s" % str(ex)) - - -def build_update_parameters(opts): - params = dict() - - v = navigate_value(opts, ["description"], None) - if not is_empty_value(v): - params["description"] = v - - v = navigate_value(opts, ["name"], None) - if not is_empty_value(v): - params["name"] = v - - if not params: - return params - - params = {"peering": params} - - return params - - -def send_update_request(module, params, client): - url = build_path(module, "v2.0/vpc/peerings/{id}") - - try: - r = client.put(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_peering_connect): error running " - "api(update), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def send_delete_request(module, params, client): - url = build_path(module, "v2.0/vpc/peerings/{id}") - - try: - r = client.delete(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_peering_connect): error running " - "api(delete), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def send_read_request(module, client): - url = build_path(module, "v2.0/vpc/peerings/{id}") - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_vpc_peering_connect): error running " - "api(read), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["peering"], None) - - -def fill_read_resp_body(body): - result = dict() - - v = fill_read_resp_accept_vpc_info(body.get("accept_vpc_info")) - result["accept_vpc_info"] = v - - result["description"] = body.get("description") - - result["id"] = body.get("id") - - result["name"] = body.get("name") - - v = fill_read_resp_request_vpc_info(body.get("request_vpc_info")) - result["request_vpc_info"] = v - - result["status"] = body.get("status") - - return result - - -def fill_read_resp_accept_vpc_info(value): - if not value: - return None - - result = dict() - - result["tenant_id"] = value.get("tenant_id") - - result["vpc_id"] = value.get("vpc_id") - - return result - - -def fill_read_resp_request_vpc_info(value): - if not value: - return None - - result = dict() - - result["tenant_id"] = value.get("tenant_id") - - result["vpc_id"] = value.get("vpc_id") - - return result - - -def update_properties(module, response, array_index, exclude_output=False): - r = user_input_parameters(module) - - v = navigate_value(response, ["read", "description"], array_index) - r["description"] = v - - v = navigate_value(response, ["read", "request_vpc_info", "vpc_id"], - array_index) - r["local_vpc_id"] = v - - v = navigate_value(response, ["read", "name"], array_index) - r["name"] = v - - v = r.get("peering_vpc") - v = flatten_peering_vpc(response, array_index, v, exclude_output) - r["peering_vpc"] = v - - return r - - -def flatten_peering_vpc(d, array_index, current_value, exclude_output): - result = current_value - has_init_value = True - if not result: - result = dict() - has_init_value = False - - v = navigate_value(d, ["read", "accept_vpc_info", "tenant_id"], - array_index) - result["project_id"] = v - - v = navigate_value(d, ["read", "accept_vpc_info", "vpc_id"], array_index) - result["vpc_id"] = v - - if has_init_value: - return result - - for v in result.values(): - if v is not None: - return result - return current_value - - -def send_list_request(module, client, url): - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_vpc_peering_connect): error running " - "api(list), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["peerings"], None) - - -def _build_identity_object(all_opts): - result = dict() - - v = expand_list_accept_vpc_info(all_opts, None) - result["accept_vpc_info"] = v - - v = navigate_value(all_opts, ["description"], None) - result["description"] = v - - result["id"] = None - - v = navigate_value(all_opts, ["name"], None) - result["name"] = v - - v = expand_list_request_vpc_info(all_opts, None) - result["request_vpc_info"] = v - - result["status"] = None - - return result - - -def expand_list_accept_vpc_info(d, array_index): - r = dict() - - v = navigate_value(d, ["peering_vpc", "project_id"], array_index) - r["tenant_id"] = v - - v = navigate_value(d, ["peering_vpc", "vpc_id"], array_index) - r["vpc_id"] = v - - for v in r.values(): - if v is not None: - return r - return None - - -def expand_list_request_vpc_info(d, array_index): - r = dict() - - r["tenant_id"] = None - - v = navigate_value(d, ["local_vpc_id"], array_index) - r["vpc_id"] = v - - for v in r.values(): - if v is not None: - return r - return None - - -def fill_list_resp_body(body): - result = dict() - - v = fill_list_resp_accept_vpc_info(body.get("accept_vpc_info")) - result["accept_vpc_info"] = v - - result["description"] = body.get("description") - - result["id"] = body.get("id") - - result["name"] = body.get("name") - - v = fill_list_resp_request_vpc_info(body.get("request_vpc_info")) - result["request_vpc_info"] = v - - result["status"] = body.get("status") - - return result - - -def fill_list_resp_accept_vpc_info(value): - if not value: - return None - - result = dict() - - result["tenant_id"] = value.get("tenant_id") - - result["vpc_id"] = value.get("vpc_id") - - return result - - -def fill_list_resp_request_vpc_info(value): - if not value: - return None - - result = dict() - - result["tenant_id"] = value.get("tenant_id") - - result["vpc_id"] = value.get("vpc_id") - - return result - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_port.py b/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_port.py deleted file mode 100644 index cf0718f5..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_port.py +++ /dev/null @@ -1,1160 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -############################################################################### -# Documentation -############################################################################### - -DOCUMENTATION = ''' ---- -module: hwc_vpc_port -description: - - vpc port management. -short_description: Creates a resource of Vpc/Port in Huawei Cloud -version_added: '0.2.0' -author: Huawei Inc. (@huaweicloud) -requirements: - - keystoneauth1 >= 3.6.0 -options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: - description: - - The timeouts for each operations. - type: dict - suboptions: - create: - description: - - The timeouts for create operation. - type: str - default: '15m' - subnet_id: - description: - - Specifies the ID of the subnet to which the port belongs. - type: str - required: true - admin_state_up: - description: - - Specifies the administrative state of the port. - type: bool - required: false - allowed_address_pairs: - description: - - Specifies a set of zero or more allowed address pairs. - required: false - type: list - elements: dict - suboptions: - ip_address: - description: - - Specifies the IP address. It cannot set it to 0.0.0.0. - Configure an independent security group for the port if a - large CIDR block (subnet mask less than 24) is configured - for parameter allowed_address_pairs. - type: str - required: false - mac_address: - description: - - Specifies the MAC address. - type: str - required: false - extra_dhcp_opts: - description: - - Specifies the extended option of DHCP. - type: list - elements: dict - required: false - suboptions: - name: - description: - - Specifies the option name. - type: str - required: false - value: - description: - - Specifies the option value. - type: str - required: false - ip_address: - description: - - Specifies the port IP address. - type: str - required: false - name: - description: - - Specifies the port name. The value can contain no more than 255 - characters. - type: str - required: false - security_groups: - description: - - Specifies the ID of the security group. - type: list - elements: str - required: false -extends_documentation_fragment: -- community.general.hwc - -''' - -EXAMPLES = ''' -# create a port -- name: Create vpc - hwc_network_vpc: - cidr: "192.168.100.0/24" - name: "ansible_network_vpc_test" - register: vpc -- name: Create subnet - hwc_vpc_subnet: - gateway_ip: "192.168.100.32" - name: "ansible_network_subnet_test" - dhcp_enable: True - vpc_id: "{{ vpc.id }}" - cidr: "192.168.100.0/26" - register: subnet -- name: Create a port - community.general.hwc_vpc_port: - subnet_id: "{{ subnet.id }}" - ip_address: "192.168.100.33" -''' - -RETURN = ''' - subnet_id: - description: - - Specifies the ID of the subnet to which the port belongs. - type: str - returned: success - admin_state_up: - description: - - Specifies the administrative state of the port. - type: bool - returned: success - allowed_address_pairs: - description: - - Specifies a set of zero or more allowed address pairs. - type: list - returned: success - contains: - ip_address: - description: - - Specifies the IP address. It cannot set it to 0.0.0.0. - Configure an independent security group for the port if a - large CIDR block (subnet mask less than 24) is configured - for parameter allowed_address_pairs. - type: str - returned: success - mac_address: - description: - - Specifies the MAC address. - type: str - returned: success - extra_dhcp_opts: - description: - - Specifies the extended option of DHCP. - type: list - returned: success - contains: - name: - description: - - Specifies the option name. - type: str - returned: success - value: - description: - - Specifies the option value. - type: str - returned: success - ip_address: - description: - - Specifies the port IP address. - type: str - returned: success - name: - description: - - Specifies the port name. The value can contain no more than 255 - characters. - type: str - returned: success - security_groups: - description: - - Specifies the ID of the security group. - type: list - returned: success - mac_address: - description: - - Specifies the port MAC address. - type: str - returned: success -''' - -from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( - Config, HwcClientException, HwcClientException404, HwcModule, - are_different_dicts, build_path, get_region, is_empty_value, - navigate_value, wait_to_finish) - - -def build_module(): - return HwcModule( - argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - timeouts=dict(type='dict', options=dict( - create=dict(default='15m', type='str'), - ), default=dict()), - subnet_id=dict(type='str', required=True), - admin_state_up=dict(type='bool'), - allowed_address_pairs=dict( - type='list', elements='dict', - options=dict( - ip_address=dict(type='str'), - mac_address=dict(type='str') - ), - ), - extra_dhcp_opts=dict(type='list', elements='dict', options=dict( - name=dict(type='str'), - value=dict(type='str') - )), - ip_address=dict(type='str'), - name=dict(type='str'), - security_groups=dict(type='list', elements='str') - ), - supports_check_mode=True, - ) - - -def main(): - """Main function""" - - module = build_module() - config = Config(module, "vpc") - - try: - resource = None - if module.params['id']: - resource = True - else: - v = search_resource(config) - if len(v) > 1: - raise Exception("Found more than one resource(%s)" % ", ".join([ - navigate_value(i, ["id"]) for i in v])) - - if len(v) == 1: - resource = v[0] - module.params['id'] = navigate_value(resource, ["id"]) - - result = {} - changed = False - if module.params['state'] == 'present': - if resource is None: - if not module.check_mode: - create(config) - changed = True - - current = read_resource(config, exclude_output=True) - expect = user_input_parameters(module) - if are_different_dicts(expect, current): - if not module.check_mode: - update(config) - changed = True - - result = read_resource(config) - result['id'] = module.params.get('id') - else: - if resource: - if not module.check_mode: - delete(config) - changed = True - - except Exception as ex: - module.fail_json(msg=str(ex)) - - else: - result['changed'] = changed - module.exit_json(**result) - - -def user_input_parameters(module): - return { - "admin_state_up": module.params.get("admin_state_up"), - "allowed_address_pairs": module.params.get("allowed_address_pairs"), - "extra_dhcp_opts": module.params.get("extra_dhcp_opts"), - "ip_address": module.params.get("ip_address"), - "name": module.params.get("name"), - "security_groups": module.params.get("security_groups"), - "subnet_id": module.params.get("subnet_id"), - } - - -def create(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) - opts = user_input_parameters(module) - - params = build_create_parameters(opts) - r = send_create_request(module, params, client) - obj = async_wait_create(config, r, client, timeout) - module.params['id'] = navigate_value(obj, ["port", "id"]) - - -def update(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - opts = user_input_parameters(module) - - params = build_update_parameters(opts) - if params: - send_update_request(module, params, client) - - -def delete(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - - send_delete_request(module, None, client) - - url = build_path(module, "ports/{id}") - - def _refresh_status(): - try: - client.get(url) - except HwcClientException404: - return True, "Done" - - except Exception: - return None, "" - - return True, "Pending" - - timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) - try: - wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout) - except Exception as ex: - module.fail_json(msg="module(hwc_vpc_port): error " - "waiting for api(delete) to " - "be done, error= %s" % str(ex)) - - -def read_resource(config, exclude_output=False): - module = config.module - client = config.client(get_region(module), "vpc", "project") - - res = {} - - r = send_read_request(module, client) - res["read"] = fill_read_resp_body(r) - - array_index = { - "read.fixed_ips": 0, - } - - return update_properties(module, res, array_index, exclude_output) - - -def _build_query_link(opts): - query_params = [] - - v = navigate_value(opts, ["subnet_id"]) - if v: - query_params.append("network_id=" + str(v)) - - v = navigate_value(opts, ["name"]) - if v: - query_params.append("name=" + str(v)) - - v = navigate_value(opts, ["admin_state_up"]) - if v: - query_params.append("admin_state_up=" + str(v)) - - query_link = "?marker={marker}&limit=10" - if query_params: - query_link += "&" + "&".join(query_params) - - return query_link - - -def search_resource(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - opts = user_input_parameters(module) - identity_obj = _build_identity_object(opts) - query_link = _build_query_link(opts) - link = "ports" + query_link - - result = [] - p = {'marker': ''} - while True: - url = link.format(**p) - r = send_list_request(module, client, url) - if not r: - break - - for item in r: - item = fill_list_resp_body(item) - if not are_different_dicts(identity_obj, item): - result.append(item) - - if len(result) > 1: - break - - p['marker'] = r[-1].get('id') - - return result - - -def build_create_parameters(opts): - params = dict() - - v = navigate_value(opts, ["admin_state_up"], None) - if not is_empty_value(v): - params["admin_state_up"] = v - - v = expand_create_allowed_address_pairs(opts, None) - if not is_empty_value(v): - params["allowed_address_pairs"] = v - - v = expand_create_extra_dhcp_opts(opts, None) - if not is_empty_value(v): - params["extra_dhcp_opts"] = v - - v = expand_create_fixed_ips(opts, None) - if not is_empty_value(v): - params["fixed_ips"] = v - - v = navigate_value(opts, ["name"], None) - if not is_empty_value(v): - params["name"] = v - - v = navigate_value(opts, ["subnet_id"], None) - if not is_empty_value(v): - params["network_id"] = v - - v = navigate_value(opts, ["security_groups"], None) - if not is_empty_value(v): - params["security_groups"] = v - - if not params: - return params - - params = {"port": params} - - return params - - -def expand_create_allowed_address_pairs(d, array_index): - new_array_index = dict() - if array_index: - new_array_index.update(array_index) - - req = [] - - v = navigate_value(d, ["allowed_address_pairs"], - new_array_index) - if not v: - return req - n = len(v) - for i in range(n): - new_array_index["allowed_address_pairs"] = i - transformed = dict() - - v = navigate_value(d, ["allowed_address_pairs", "ip_address"], - new_array_index) - if not is_empty_value(v): - transformed["ip_address"] = v - - v = navigate_value(d, ["allowed_address_pairs", "mac_address"], - new_array_index) - if not is_empty_value(v): - transformed["mac_address"] = v - - if transformed: - req.append(transformed) - - return req - - -def expand_create_extra_dhcp_opts(d, array_index): - new_array_index = dict() - if array_index: - new_array_index.update(array_index) - - req = [] - - v = navigate_value(d, ["extra_dhcp_opts"], - new_array_index) - if not v: - return req - n = len(v) - for i in range(n): - new_array_index["extra_dhcp_opts"] = i - transformed = dict() - - v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index) - if not is_empty_value(v): - transformed["opt_name"] = v - - v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index) - if not is_empty_value(v): - transformed["opt_value"] = v - - if transformed: - req.append(transformed) - - return req - - -def expand_create_fixed_ips(d, array_index): - new_array_index = dict() - if array_index: - new_array_index.update(array_index) - - req = [] - - n = 1 - for i in range(n): - transformed = dict() - - v = navigate_value(d, ["ip_address"], new_array_index) - if not is_empty_value(v): - transformed["ip_address"] = v - - if transformed: - req.append(transformed) - - return req - - -def send_create_request(module, params, client): - url = "ports" - try: - r = client.post(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_port): error running " - "api(create), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def async_wait_create(config, result, client, timeout): - module = config.module - - path_parameters = { - "port_id": ["port", "id"], - } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) - - url = build_path(module, "ports/{port_id}", data) - - def _query_status(): - r = None - try: - r = client.get(url, timeout=timeout) - except HwcClientException: - return None, "" - - try: - s = navigate_value(r, ["port", "status"]) - return r, s - except Exception: - return None, "" - - try: - return wait_to_finish( - ["ACTIVE", "DOWN"], - ["BUILD"], - _query_status, timeout) - except Exception as ex: - module.fail_json(msg="module(hwc_vpc_port): error " - "waiting for api(create) to " - "be done, error= %s" % str(ex)) - - -def build_update_parameters(opts): - params = dict() - - v = expand_update_allowed_address_pairs(opts, None) - if v is not None: - params["allowed_address_pairs"] = v - - v = expand_update_extra_dhcp_opts(opts, None) - if v is not None: - params["extra_dhcp_opts"] = v - - v = navigate_value(opts, ["name"], None) - if not is_empty_value(v): - params["name"] = v - - v = navigate_value(opts, ["security_groups"], None) - if not is_empty_value(v): - params["security_groups"] = v - - if not params: - return params - - params = {"port": params} - - return params - - -def expand_update_allowed_address_pairs(d, array_index): - new_array_index = dict() - if array_index: - new_array_index.update(array_index) - - req = [] - - v = navigate_value(d, ["allowed_address_pairs"], - new_array_index) - if not v: - return req - n = len(v) - for i in range(n): - new_array_index["allowed_address_pairs"] = i - transformed = dict() - - v = navigate_value(d, ["allowed_address_pairs", "ip_address"], - new_array_index) - if not is_empty_value(v): - transformed["ip_address"] = v - - v = navigate_value(d, ["allowed_address_pairs", "mac_address"], - new_array_index) - if not is_empty_value(v): - transformed["mac_address"] = v - - if transformed: - req.append(transformed) - - return req - - -def expand_update_extra_dhcp_opts(d, array_index): - new_array_index = dict() - if array_index: - new_array_index.update(array_index) - - req = [] - - v = navigate_value(d, ["extra_dhcp_opts"], - new_array_index) - if not v: - return req - n = len(v) - for i in range(n): - new_array_index["extra_dhcp_opts"] = i - transformed = dict() - - v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index) - if not is_empty_value(v): - transformed["opt_name"] = v - - v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index) - if not is_empty_value(v): - transformed["opt_value"] = v - - if transformed: - req.append(transformed) - - return req - - -def send_update_request(module, params, client): - url = build_path(module, "ports/{id}") - - try: - r = client.put(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_port): error running " - "api(update), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def send_delete_request(module, params, client): - url = build_path(module, "ports/{id}") - - try: - r = client.delete(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_port): error running " - "api(delete), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def send_read_request(module, client): - url = build_path(module, "ports/{id}") - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_vpc_port): error running " - "api(read), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["port"], None) - - -def fill_read_resp_body(body): - result = dict() - - result["admin_state_up"] = body.get("admin_state_up") - - v = fill_read_resp_allowed_address_pairs(body.get("allowed_address_pairs")) - result["allowed_address_pairs"] = v - - result["binding_host_id"] = body.get("binding_host_id") - - result["binding_vnic_type"] = body.get("binding_vnic_type") - - result["device_id"] = body.get("device_id") - - result["device_owner"] = body.get("device_owner") - - result["dns_name"] = body.get("dns_name") - - v = fill_read_resp_extra_dhcp_opts(body.get("extra_dhcp_opts")) - result["extra_dhcp_opts"] = v - - v = fill_read_resp_fixed_ips(body.get("fixed_ips")) - result["fixed_ips"] = v - - result["id"] = body.get("id") - - result["mac_address"] = body.get("mac_address") - - result["name"] = body.get("name") - - result["network_id"] = body.get("network_id") - - result["security_groups"] = body.get("security_groups") - - result["status"] = body.get("status") - - result["tenant_id"] = body.get("tenant_id") - - return result - - -def fill_read_resp_allowed_address_pairs(value): - if not value: - return None - - result = [] - for item in value: - val = dict() - - val["ip_address"] = item.get("ip_address") - - val["mac_address"] = item.get("mac_address") - - result.append(val) - - return result - - -def fill_read_resp_extra_dhcp_opts(value): - if not value: - return None - - result = [] - for item in value: - val = dict() - - val["opt_name"] = item.get("opt_name") - - val["opt_value"] = item.get("opt_value") - - result.append(val) - - return result - - -def fill_read_resp_fixed_ips(value): - if not value: - return None - - result = [] - for item in value: - val = dict() - - val["ip_address"] = item.get("ip_address") - - result.append(val) - - return result - - -def update_properties(module, response, array_index, exclude_output=False): - r = user_input_parameters(module) - - v = navigate_value(response, ["read", "admin_state_up"], array_index) - r["admin_state_up"] = v - - v = r.get("allowed_address_pairs") - v = flatten_allowed_address_pairs(response, array_index, v, exclude_output) - r["allowed_address_pairs"] = v - - v = r.get("extra_dhcp_opts") - v = flatten_extra_dhcp_opts(response, array_index, v, exclude_output) - r["extra_dhcp_opts"] = v - - v = navigate_value(response, ["read", "fixed_ips", "ip_address"], - array_index) - r["ip_address"] = v - - if not exclude_output: - v = navigate_value(response, ["read", "mac_address"], array_index) - r["mac_address"] = v - - v = navigate_value(response, ["read", "name"], array_index) - r["name"] = v - - v = navigate_value(response, ["read", "security_groups"], array_index) - r["security_groups"] = v - - v = navigate_value(response, ["read", "network_id"], array_index) - r["subnet_id"] = v - - return r - - -def flatten_allowed_address_pairs(d, array_index, - current_value, exclude_output): - n = 0 - result = current_value - has_init_value = True - if result: - n = len(result) - else: - has_init_value = False - result = [] - v = navigate_value(d, ["read", "allowed_address_pairs"], - array_index) - if not v: - return current_value - n = len(v) - - new_array_index = dict() - if array_index: - new_array_index.update(array_index) - - for i in range(n): - new_array_index["read.allowed_address_pairs"] = i - - val = dict() - if len(result) >= (i + 1) and result[i]: - val = result[i] - - v = navigate_value(d, ["read", "allowed_address_pairs", "ip_address"], - new_array_index) - val["ip_address"] = v - - v = navigate_value(d, ["read", "allowed_address_pairs", "mac_address"], - new_array_index) - val["mac_address"] = v - - if len(result) >= (i + 1): - result[i] = val - else: - for v in val.values(): - if v is not None: - result.append(val) - break - - return result if (has_init_value or result) else current_value - - -def flatten_extra_dhcp_opts(d, array_index, current_value, exclude_output): - n = 0 - result = current_value - has_init_value = True - if result: - n = len(result) - else: - has_init_value = False - result = [] - v = navigate_value(d, ["read", "extra_dhcp_opts"], - array_index) - if not v: - return current_value - n = len(v) - - new_array_index = dict() - if array_index: - new_array_index.update(array_index) - - for i in range(n): - new_array_index["read.extra_dhcp_opts"] = i - - val = dict() - if len(result) >= (i + 1) and result[i]: - val = result[i] - - v = navigate_value(d, ["read", "extra_dhcp_opts", "opt_name"], - new_array_index) - val["name"] = v - - v = navigate_value(d, ["read", "extra_dhcp_opts", "opt_value"], - new_array_index) - val["value"] = v - - if len(result) >= (i + 1): - result[i] = val - else: - for v in val.values(): - if v is not None: - result.append(val) - break - - return result if (has_init_value or result) else current_value - - -def send_list_request(module, client, url): - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_vpc_port): error running " - "api(list), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["ports"], None) - - -def _build_identity_object(all_opts): - result = dict() - - v = navigate_value(all_opts, ["admin_state_up"], None) - result["admin_state_up"] = v - - v = expand_list_allowed_address_pairs(all_opts, None) - result["allowed_address_pairs"] = v - - result["binding_host_id"] = None - - result["binding_vnic_type"] = None - - result["device_id"] = None - - result["device_owner"] = None - - result["dns_name"] = None - - v = expand_list_extra_dhcp_opts(all_opts, None) - result["extra_dhcp_opts"] = v - - v = expand_list_fixed_ips(all_opts, None) - result["fixed_ips"] = v - - result["id"] = None - - result["mac_address"] = None - - v = navigate_value(all_opts, ["name"], None) - result["name"] = v - - v = navigate_value(all_opts, ["subnet_id"], None) - result["network_id"] = v - - v = navigate_value(all_opts, ["security_groups"], None) - result["security_groups"] = v - - result["status"] = None - - result["tenant_id"] = None - - return result - - -def expand_list_allowed_address_pairs(d, array_index): - new_array_index = dict() - if array_index: - new_array_index.update(array_index) - - req = [] - - v = navigate_value(d, ["allowed_address_pairs"], - new_array_index) - - n = len(v) if v else 1 - for i in range(n): - new_array_index["allowed_address_pairs"] = i - transformed = dict() - - v = navigate_value(d, ["allowed_address_pairs", "ip_address"], - new_array_index) - transformed["ip_address"] = v - - v = navigate_value(d, ["allowed_address_pairs", "mac_address"], - new_array_index) - transformed["mac_address"] = v - - for v in transformed.values(): - if v is not None: - req.append(transformed) - break - - return req if req else None - - -def expand_list_extra_dhcp_opts(d, array_index): - new_array_index = dict() - if array_index: - new_array_index.update(array_index) - - req = [] - - v = navigate_value(d, ["extra_dhcp_opts"], - new_array_index) - - n = len(v) if v else 1 - for i in range(n): - new_array_index["extra_dhcp_opts"] = i - transformed = dict() - - v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index) - transformed["opt_name"] = v - - v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index) - transformed["opt_value"] = v - - for v in transformed.values(): - if v is not None: - req.append(transformed) - break - - return req if req else None - - -def expand_list_fixed_ips(d, array_index): - new_array_index = dict() - if array_index: - new_array_index.update(array_index) - - req = [] - - n = 1 - for i in range(n): - transformed = dict() - - v = navigate_value(d, ["ip_address"], new_array_index) - transformed["ip_address"] = v - - for v in transformed.values(): - if v is not None: - req.append(transformed) - break - - return req if req else None - - -def fill_list_resp_body(body): - result = dict() - - result["admin_state_up"] = body.get("admin_state_up") - - v = fill_list_resp_allowed_address_pairs(body.get("allowed_address_pairs")) - result["allowed_address_pairs"] = v - - result["binding_host_id"] = body.get("binding_host_id") - - result["binding_vnic_type"] = body.get("binding_vnic_type") - - result["device_id"] = body.get("device_id") - - result["device_owner"] = body.get("device_owner") - - result["dns_name"] = body.get("dns_name") - - v = fill_list_resp_extra_dhcp_opts(body.get("extra_dhcp_opts")) - result["extra_dhcp_opts"] = v - - v = fill_list_resp_fixed_ips(body.get("fixed_ips")) - result["fixed_ips"] = v - - result["id"] = body.get("id") - - result["mac_address"] = body.get("mac_address") - - result["name"] = body.get("name") - - result["network_id"] = body.get("network_id") - - result["security_groups"] = body.get("security_groups") - - result["status"] = body.get("status") - - result["tenant_id"] = body.get("tenant_id") - - return result - - -def fill_list_resp_allowed_address_pairs(value): - if not value: - return None - - result = [] - for item in value: - val = dict() - - val["ip_address"] = item.get("ip_address") - - val["mac_address"] = item.get("mac_address") - - result.append(val) - - return result - - -def fill_list_resp_extra_dhcp_opts(value): - if not value: - return None - - result = [] - for item in value: - val = dict() - - val["opt_name"] = item.get("opt_name") - - val["opt_value"] = item.get("opt_value") - - result.append(val) - - return result - - -def fill_list_resp_fixed_ips(value): - if not value: - return None - - result = [] - for item in value: - val = dict() - - val["ip_address"] = item.get("ip_address") - - result.append(val) - - return result - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_private_ip.py b/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_private_ip.py deleted file mode 100644 index 901755f3..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_private_ip.py +++ /dev/null @@ -1,354 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -############################################################################### -# Documentation -############################################################################### - -DOCUMENTATION = ''' ---- -module: hwc_vpc_private_ip -description: - - vpc private ip management. -short_description: Creates a resource of Vpc/PrivateIP in Huawei Cloud -notes: - - If I(id) option is provided, it takes precedence over I(subnet_id), I(ip_address) for private ip selection. - - I(subnet_id), I(ip_address) are used for private ip selection. If more than one private ip with this options exists, execution is aborted. - - No parameter support updating. If one of option is changed, the module will create a new resource. -version_added: '0.2.0' -author: Huawei Inc. (@huaweicloud) -requirements: - - keystoneauth1 >= 3.6.0 -options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - subnet_id: - description: - - Specifies the ID of the subnet from which IP addresses are - assigned. Cannot be changed after creating the private ip. - type: str - required: true - ip_address: - description: - - Specifies the target IP address. The value can be an available IP - address in the subnet. If it is not specified, the system - automatically assigns an IP address. Cannot be changed after - creating the private ip. - type: str - required: false -extends_documentation_fragment: -- community.general.hwc - -''' - -EXAMPLES = ''' -# create a private ip -- name: Create vpc - hwc_network_vpc: - cidr: "192.168.100.0/24" - name: "ansible_network_vpc_test" - register: vpc -- name: Create subnet - hwc_vpc_subnet: - gateway_ip: "192.168.100.32" - name: "ansible_network_subnet_test" - dhcp_enable: True - vpc_id: "{{ vpc.id }}" - cidr: "192.168.100.0/26" - register: subnet -- name: Create a private ip - community.general.hwc_vpc_private_ip: - subnet_id: "{{ subnet.id }}" - ip_address: "192.168.100.33" -''' - -RETURN = ''' - subnet_id: - description: - - Specifies the ID of the subnet from which IP addresses are - assigned. - type: str - returned: success - ip_address: - description: - - Specifies the target IP address. The value can be an available IP - address in the subnet. If it is not specified, the system - automatically assigns an IP address. - type: str - returned: success -''' - -from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( - Config, HwcClientException, HwcModule, are_different_dicts, build_path, - get_region, is_empty_value, navigate_value) - - -def build_module(): - return HwcModule( - argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - subnet_id=dict(type='str', required=True), - ip_address=dict(type='str') - ), - supports_check_mode=True, - ) - - -def main(): - """Main function""" - - module = build_module() - config = Config(module, "vpc") - - try: - resource = None - if module.params['id']: - resource = True - else: - v = search_resource(config) - if len(v) > 1: - raise Exception("Found more than one resource(%s)" % ", ".join([ - navigate_value(i, ["id"]) for i in v])) - - if len(v) == 1: - resource = v[0] - module.params['id'] = navigate_value(resource, ["id"]) - - result = {} - changed = False - if module.params['state'] == 'present': - if resource is None: - if not module.check_mode: - create(config) - changed = True - - current = read_resource(config, exclude_output=True) - expect = user_input_parameters(module) - if are_different_dicts(expect, current): - raise Exception( - "Cannot change option from (%s) to (%s)of an" - " existing resource.(%s)" % (current, expect, module.params.get('id'))) - - result = read_resource(config) - result['id'] = module.params.get('id') - else: - if resource: - if not module.check_mode: - delete(config) - changed = True - - except Exception as ex: - module.fail_json(msg=str(ex)) - - else: - result['changed'] = changed - module.exit_json(**result) - - -def user_input_parameters(module): - return { - "ip_address": module.params.get("ip_address"), - "subnet_id": module.params.get("subnet_id"), - } - - -def create(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - opts = user_input_parameters(module) - - params = build_create_parameters(opts) - r = send_create_request(module, params, client) - module.params['id'] = navigate_value(r, ["privateips", "id"], - {"privateips": 0}) - - -def delete(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - - send_delete_request(module, None, client) - - -def read_resource(config, exclude_output=False): - module = config.module - client = config.client(get_region(module), "vpc", "project") - - res = {} - - r = send_read_request(module, client) - res["read"] = fill_read_resp_body(r) - - return update_properties(module, res, None, exclude_output) - - -def _build_query_link(opts): - query_link = "?marker={marker}&limit=10" - - return query_link - - -def search_resource(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - opts = user_input_parameters(module) - identity_obj = _build_identity_object(opts) - query_link = _build_query_link(opts) - link = build_path(module, "subnets/{subnet_id}/privateips") + query_link - - result = [] - p = {'marker': ''} - while True: - url = link.format(**p) - r = send_list_request(module, client, url) - if not r: - break - - for item in r: - item = fill_list_resp_body(item) - if not are_different_dicts(identity_obj, item): - result.append(item) - - if len(result) > 1: - break - - p['marker'] = r[-1].get('id') - - return result - - -def build_create_parameters(opts): - params = dict() - - v = navigate_value(opts, ["ip_address"], None) - if not is_empty_value(v): - params["ip_address"] = v - - v = navigate_value(opts, ["subnet_id"], None) - if not is_empty_value(v): - params["subnet_id"] = v - - if not params: - return params - - params = {"privateips": [params]} - - return params - - -def send_create_request(module, params, client): - url = "privateips" - try: - r = client.post(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_private_ip): error running " - "api(create), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def send_delete_request(module, params, client): - url = build_path(module, "privateips/{id}") - - try: - r = client.delete(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_private_ip): error running " - "api(delete), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def send_read_request(module, client): - url = build_path(module, "privateips/{id}") - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_vpc_private_ip): error running " - "api(read), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["privateip"], None) - - -def fill_read_resp_body(body): - result = dict() - - result["id"] = body.get("id") - - result["ip_address"] = body.get("ip_address") - - result["subnet_id"] = body.get("subnet_id") - - return result - - -def update_properties(module, response, array_index, exclude_output=False): - r = user_input_parameters(module) - - v = navigate_value(response, ["read", "ip_address"], array_index) - r["ip_address"] = v - - v = navigate_value(response, ["read", "subnet_id"], array_index) - r["subnet_id"] = v - - return r - - -def send_list_request(module, client, url): - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_vpc_private_ip): error running " - "api(list), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["privateips"], None) - - -def _build_identity_object(all_opts): - result = dict() - - result["id"] = None - - v = navigate_value(all_opts, ["ip_address"], None) - result["ip_address"] = v - - v = navigate_value(all_opts, ["subnet_id"], None) - result["subnet_id"] = v - - return result - - -def fill_list_resp_body(body): - result = dict() - - result["id"] = body.get("id") - - result["ip_address"] = body.get("ip_address") - - result["subnet_id"] = body.get("subnet_id") - - return result - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_route.py b/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_route.py deleted file mode 100644 index 31829dc6..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_route.py +++ /dev/null @@ -1,437 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -############################################################################### -# Documentation -############################################################################### - -DOCUMENTATION = ''' ---- -module: hwc_vpc_route -description: - - vpc route management. -short_description: Creates a resource of Vpc/Route in Huawei Cloud -notes: - - If I(id) option is provided, it takes precedence over I(destination), I(vpc_id), I(type) and I(next_hop) for route selection. - - I(destination), I(vpc_id), I(type) and I(next_hop) are used for route selection. If more than one route with this options exists, execution is aborted. - - No parameter support updating. If one of option is changed, the module will create a new resource. -version_added: '0.2.0' -author: Huawei Inc. (@huaweicloud) -requirements: - - keystoneauth1 >= 3.6.0 -options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - destination: - description: - - Specifies the destination IP address or CIDR block. - type: str - required: true - next_hop: - description: - - Specifies the next hop. The value is VPC peering connection ID. - type: str - required: true - vpc_id: - description: - - Specifies the VPC ID to which route is added. - type: str - required: true - type: - description: - - Specifies the type of route. - type: str - required: false - default: 'peering' -extends_documentation_fragment: -- community.general.hwc - -''' - -EXAMPLES = ''' -# create a peering connect -- name: Create a local vpc - hwc_network_vpc: - cidr: "192.168.0.0/16" - name: "ansible_network_vpc_test_local" - register: vpc1 -- name: Create a peering vpc - hwc_network_vpc: - cidr: "192.168.0.0/16" - name: "ansible_network_vpc_test_peering" - register: vpc2 -- name: Create a peering connect - hwc_vpc_peering_connect: - local_vpc_id: "{{ vpc1.id }}" - name: "ansible_network_peering_test" - filters: - - "name" - peering_vpc: - vpc_id: "{{ vpc2.id }}" - register: connect -- name: Create a route - community.general.hwc_vpc_route: - vpc_id: "{{ vpc1.id }}" - destination: "192.168.0.0/16" - next_hop: "{{ connect.id }}" -''' - -RETURN = ''' - id: - description: - - UUID of the route. - type: str - returned: success - destination: - description: - - Specifies the destination IP address or CIDR block. - type: str - returned: success - next_hop: - description: - - Specifies the next hop. The value is VPC peering connection ID. - type: str - returned: success - vpc_id: - description: - - Specifies the VPC ID to which route is added. - type: str - returned: success - type: - description: - - Specifies the type of route. - type: str - returned: success -''' - -from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( - Config, HwcClientException, HwcModule, are_different_dicts, build_path, - get_region, is_empty_value, navigate_value) - - -def build_module(): - return HwcModule( - argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - destination=dict(type='str', required=True), - next_hop=dict(type='str', required=True), - vpc_id=dict(type='str', required=True), - type=dict(type='str', default='peering'), - id=dict(type='str') - ), - supports_check_mode=True, - ) - - -def main(): - """Main function""" - - module = build_module() - config = Config(module, "vpc") - - try: - resource = None - if module.params.get("id"): - resource = get_resource_by_id(config) - if module.params['state'] == 'present': - opts = user_input_parameters(module) - if are_different_dicts(resource, opts): - raise Exception( - "Cannot change option from (%s) to (%s) for an" - " existing route.(%s)" % (resource, opts, - config.module.params.get( - 'id'))) - else: - v = search_resource(config) - if len(v) > 1: - raise Exception("Found more than one resource(%s)" % ", ".join([ - navigate_value(i, ["id"]) for i in v])) - - if len(v) == 1: - resource = update_properties(module, {"read": v[0]}, None) - module.params['id'] = navigate_value(resource, ["id"]) - - result = {} - changed = False - if module.params['state'] == 'present': - if resource is None: - if not module.check_mode: - resource = create(config) - changed = True - - result = resource - else: - if resource: - if not module.check_mode: - delete(config) - changed = True - - except Exception as ex: - module.fail_json(msg=str(ex)) - - else: - result['changed'] = changed - module.exit_json(**result) - - -def user_input_parameters(module): - return { - "destination": module.params.get("destination"), - "next_hop": module.params.get("next_hop"), - "type": module.params.get("type"), - "vpc_id": module.params.get("vpc_id"), - "id": module.params.get("id"), - } - - -def create(config): - module = config.module - client = config.client(get_region(module), "network", "project") - opts = user_input_parameters(module) - - params = build_create_parameters(opts) - r = send_create_request(module, params, client) - module.params['id'] = navigate_value(r, ["route", "id"]) - - result = update_properties(module, {"read": fill_resp_body(r)}, None) - return result - - -def delete(config): - module = config.module - client = config.client(get_region(module), "network", "project") - - send_delete_request(module, None, client) - - -def get_resource_by_id(config, exclude_output=False): - module = config.module - client = config.client(get_region(module), "network", "project") - - res = {} - - r = send_read_request(module, client) - res["read"] = fill_resp_body(r) - - result = update_properties(module, res, None, exclude_output) - return result - - -def _build_query_link(opts): - query_params = [] - - v = navigate_value(opts, ["type"]) - if v: - query_params.append("type=" + str(v)) - - v = navigate_value(opts, ["destination"]) - if v: - query_params.append("destination=" + str(v)) - - v = navigate_value(opts, ["vpc_id"]) - if v: - query_params.append("vpc_id=" + str(v)) - - query_link = "?marker={marker}&limit=10" - if query_params: - query_link += "&" + "&".join(query_params) - - return query_link - - -def search_resource(config): - module = config.module - client = config.client(get_region(module), "network", "project") - opts = user_input_parameters(module) - identity_obj = _build_identity_object(opts) - query_link = _build_query_link(opts) - link = "v2.0/vpc/routes" + query_link - - result = [] - p = {'marker': ''} - while True: - url = link.format(**p) - r = send_list_request(module, client, url) - if not r: - break - - for item in r: - item = fill_list_resp_body(item) - if not are_different_dicts(identity_obj, item): - result.append(item) - - if len(result) > 1: - break - - p['marker'] = r[-1].get('id') - - return result - - -def build_create_parameters(opts): - params = dict() - - v = navigate_value(opts, ["destination"], None) - if not is_empty_value(v): - params["destination"] = v - - v = navigate_value(opts, ["next_hop"], None) - if not is_empty_value(v): - params["nexthop"] = v - - v = navigate_value(opts, ["type"], None) - if not is_empty_value(v): - params["type"] = v - - v = navigate_value(opts, ["vpc_id"], None) - if not is_empty_value(v): - params["vpc_id"] = v - - if not params: - return params - - params = {"route": params} - - return params - - -def send_create_request(module, params, client): - url = "v2.0/vpc/routes" - try: - r = client.post(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_route): error running " - "api(create), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def send_delete_request(module, params, client): - url = build_path(module, "v2.0/vpc/routes/{id}") - - try: - r = client.delete(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_route): error running " - "api(delete), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def send_read_request(module, client): - url = build_path(module, "v2.0/vpc/routes/{id}") - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_vpc_route): error running " - "api(read), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["route"], None) - - -def fill_resp_body(body): - result = dict() - - result["destination"] = body.get("destination") - - result["id"] = body.get("id") - - result["nexthop"] = body.get("nexthop") - - result["type"] = body.get("type") - - result["vpc_id"] = body.get("vpc_id") - - return result - - -def update_properties(module, response, array_index, exclude_output=False): - r = user_input_parameters(module) - - v = navigate_value(response, ["read", "destination"], array_index) - r["destination"] = v - - v = navigate_value(response, ["read", "nexthop"], array_index) - r["next_hop"] = v - - v = navigate_value(response, ["read", "type"], array_index) - r["type"] = v - - v = navigate_value(response, ["read", "vpc_id"], array_index) - r["vpc_id"] = v - - v = navigate_value(response, ["read", "id"], array_index) - r["id"] = v - - return r - - -def send_list_request(module, client, url): - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_vpc_route): error running " - "api(list), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["routes"], None) - - -def _build_identity_object(all_opts): - result = dict() - - v = navigate_value(all_opts, ["destination"], None) - result["destination"] = v - - v = navigate_value(all_opts, ["id"], None) - result["id"] = v - - v = navigate_value(all_opts, ["next_hop"], None) - result["nexthop"] = v - - v = navigate_value(all_opts, ["type"], None) - result["type"] = v - - v = navigate_value(all_opts, ["vpc_id"], None) - result["vpc_id"] = v - - return result - - -def fill_list_resp_body(body): - result = dict() - - result["destination"] = body.get("destination") - - result["id"] = body.get("id") - - result["nexthop"] = body.get("nexthop") - - result["type"] = body.get("type") - - result["vpc_id"] = body.get("vpc_id") - - return result - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_security_group.py b/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_security_group.py deleted file mode 100644 index 5a1dfe70..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_security_group.py +++ /dev/null @@ -1,644 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -############################################################################### -# Documentation -############################################################################### - -DOCUMENTATION = ''' ---- -module: hwc_vpc_security_group -description: - - vpc security group management. -short_description: Creates a resource of Vpc/SecurityGroup in Huawei Cloud -notes: - - If I(id) option is provided, it takes precedence over I(name), - I(enterprise_project_id) and I(vpc_id) for security group selection. - - I(name), I(enterprise_project_id) and I(vpc_id) are used for security - group selection. If more than one security group with this options exists, - execution is aborted. - - No parameter support updating. If one of option is changed, the module - will create a new resource. -version_added: '0.2.0' -author: Huawei Inc. (@huaweicloud) -requirements: - - keystoneauth1 >= 3.6.0 -options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - name: - description: - - Specifies the security group name. The value is a string of 1 to - 64 characters that can contain letters, digits, underscores C(_), - hyphens (-), and periods (.). - type: str - required: true - enterprise_project_id: - description: - - Specifies the enterprise project ID. When creating a security - group, associate the enterprise project ID with the security - group.s - type: str - required: false - vpc_id: - description: - - Specifies the resource ID of the VPC to which the security group - belongs. - type: str - required: false -extends_documentation_fragment: -- community.general.hwc - -''' - -EXAMPLES = ''' -# create a security group -- name: Create a security group - community.general.hwc_vpc_security_group: - name: "ansible_network_security_group_test" -''' - -RETURN = ''' - name: - description: - - Specifies the security group name. The value is a string of 1 to - 64 characters that can contain letters, digits, underscores C(_), - hyphens (-), and periods (.). - type: str - returned: success - enterprise_project_id: - description: - - Specifies the enterprise project ID. When creating a security - group, associate the enterprise project ID with the security - group. - type: str - returned: success - vpc_id: - description: - - Specifies the resource ID of the VPC to which the security group - belongs. - type: str - returned: success - rules: - description: - - Specifies the security group rule, which ensures that resources - in the security group can communicate with one another. - type: complex - returned: success - contains: - description: - description: - - Provides supplementary information about the security - group rule. - type: str - returned: success - direction: - description: - - Specifies the direction of access control. The value can - be egress or ingress. - type: str - returned: success - ethertype: - description: - - Specifies the IP protocol version. The value can be IPv4 - or IPv6. - type: str - returned: success - id: - description: - - Specifies the security group rule ID. - type: str - returned: success - port_range_max: - description: - - Specifies the end port number. The value ranges from 1 to - 65535. If the protocol is not icmp, the value cannot be - smaller than the port_range_min value. An empty value - indicates all ports. - type: int - returned: success - port_range_min: - description: - - Specifies the start port number. The value ranges from 1 - to 65535. The value cannot be greater than the - port_range_max value. An empty value indicates all ports. - type: int - returned: success - protocol: - description: - - Specifies the protocol type. The value can be icmp, tcp, - udp, or others. If the parameter is left blank, the - security group supports all protocols. - type: str - returned: success - remote_address_group_id: - description: - - Specifies the ID of remote IP address group. - type: str - returned: success - remote_group_id: - description: - - Specifies the ID of the peer security group. - type: str - returned: success - remote_ip_prefix: - description: - - Specifies the remote IP address. If the access control - direction is set to egress, the parameter specifies the - source IP address. If the access control direction is set - to ingress, the parameter specifies the destination IP - address. - type: str - returned: success -''' - -from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( - Config, HwcClientException, HwcModule, are_different_dicts, build_path, - get_region, is_empty_value, navigate_value) - - -def build_module(): - return HwcModule( - argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - name=dict(type='str', required=True), - enterprise_project_id=dict(type='str'), - vpc_id=dict(type='str') - ), - supports_check_mode=True, - ) - - -def main(): - """Main function""" - - module = build_module() - config = Config(module, "vpc") - - try: - resource = None - if module.params.get("id"): - resource = read_resource(config) - if module.params['state'] == 'present': - check_resource_option(resource, module) - else: - v = search_resource(config) - if len(v) > 1: - raise Exception("Found more than one resource(%s)" % ", ".join([ - navigate_value(i, ["id"]) for i in v])) - - if len(v) == 1: - resource = update_properties(module, {"read": v[0]}, None) - module.params['id'] = navigate_value(resource, ["id"]) - - result = {} - changed = False - if module.params['state'] == 'present': - if resource is None: - if not module.check_mode: - resource = create(config) - changed = True - - result = resource - else: - if resource: - if not module.check_mode: - delete(config) - changed = True - - except Exception as ex: - module.fail_json(msg=str(ex)) - - else: - result['changed'] = changed - module.exit_json(**result) - - -def user_input_parameters(module): - return { - "enterprise_project_id": module.params.get("enterprise_project_id"), - "name": module.params.get("name"), - "vpc_id": module.params.get("vpc_id"), - "id": module.params.get("id"), - } - - -def check_resource_option(resource, module): - opts = user_input_parameters(module) - - resource = { - "enterprise_project_id": resource.get("enterprise_project_id"), - "name": resource.get("name"), - "vpc_id": resource.get("vpc_id"), - "id": resource.get("id"), - } - - if are_different_dicts(resource, opts): - raise Exception( - "Cannot change option from (%s) to (%s) for an" - " existing security group(%s)." % (resource, opts, - module.params.get('id'))) - - -def create(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - opts = user_input_parameters(module) - - params = build_create_parameters(opts) - r = send_create_request(module, params, client) - module.params['id'] = navigate_value(r, ["security_group", "id"]) - - result = update_properties(module, {"read": fill_read_resp_body(r)}, None) - return result - - -def delete(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - - send_delete_request(module, None, client) - - -def read_resource(config, exclude_output=False): - module = config.module - client = config.client(get_region(module), "vpc", "project") - - res = {} - - r = send_read_request(module, client) - res["read"] = fill_read_resp_body(r) - - return update_properties(module, res, None, exclude_output) - - -def _build_query_link(opts): - query_params = [] - - v = navigate_value(opts, ["enterprise_project_id"]) - if v: - query_params.append("enterprise_project_id=" + str(v)) - - v = navigate_value(opts, ["vpc_id"]) - if v: - query_params.append("vpc_id=" + str(v)) - - query_link = "?marker={marker}&limit=10" - if query_params: - query_link += "&" + "&".join(query_params) - - return query_link - - -def search_resource(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - opts = user_input_parameters(module) - identity_obj = _build_identity_object(opts) - query_link = _build_query_link(opts) - link = "security-groups" + query_link - - result = [] - p = {'marker': ''} - while True: - url = link.format(**p) - r = send_list_request(module, client, url) - if not r: - break - - for item in r: - item = fill_list_resp_body(item) - if not are_different_dicts(identity_obj, item): - result.append(item) - - if len(result) > 1: - break - - p['marker'] = r[-1].get('id') - - return result - - -def build_create_parameters(opts): - params = dict() - - v = navigate_value(opts, ["enterprise_project_id"], None) - if not is_empty_value(v): - params["enterprise_project_id"] = v - - v = navigate_value(opts, ["name"], None) - if not is_empty_value(v): - params["name"] = v - - v = navigate_value(opts, ["vpc_id"], None) - if not is_empty_value(v): - params["vpc_id"] = v - - if not params: - return params - - params = {"security_group": params} - - return params - - -def send_create_request(module, params, client): - url = "security-groups" - try: - r = client.post(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_security_group): error running " - "api(create), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def send_delete_request(module, params, client): - url = build_path(module, "security-groups/{id}") - - try: - r = client.delete(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_security_group): error running " - "api(delete), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def send_read_request(module, client): - url = build_path(module, "security-groups/{id}") - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_vpc_security_group): error running " - "api(read), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["security_group"], None) - - -def fill_read_resp_body(body): - result = dict() - - result["enterprise_project_id"] = body.get("enterprise_project_id") - - result["id"] = body.get("id") - - result["name"] = body.get("name") - - v = fill_read_resp_security_group_rules(body.get("security_group_rules")) - result["security_group_rules"] = v - - result["vpc_id"] = body.get("vpc_id") - - return result - - -def fill_read_resp_security_group_rules(value): - if not value: - return None - - result = [] - for item in value: - val = dict() - - val["description"] = item.get("description") - - val["direction"] = item.get("direction") - - val["ethertype"] = item.get("ethertype") - - val["id"] = item.get("id") - - val["port_range_max"] = item.get("port_range_max") - - val["port_range_min"] = item.get("port_range_min") - - val["protocol"] = item.get("protocol") - - val["remote_address_group_id"] = item.get("remote_address_group_id") - - val["remote_group_id"] = item.get("remote_group_id") - - val["remote_ip_prefix"] = item.get("remote_ip_prefix") - - val["security_group_id"] = item.get("security_group_id") - - result.append(val) - - return result - - -def update_properties(module, response, array_index, exclude_output=False): - r = user_input_parameters(module) - - v = navigate_value(response, ["read", "enterprise_project_id"], - array_index) - r["enterprise_project_id"] = v - - v = navigate_value(response, ["read", "name"], array_index) - r["name"] = v - - if not exclude_output: - v = r.get("rules") - v = flatten_rules(response, array_index, v, exclude_output) - r["rules"] = v - - v = navigate_value(response, ["read", "vpc_id"], array_index) - r["vpc_id"] = v - - v = navigate_value(response, ["read", "id"], array_index) - r["id"] = v - - return r - - -def flatten_rules(d, array_index, current_value, exclude_output): - n = 0 - result = current_value - has_init_value = True - if result: - n = len(result) - else: - has_init_value = False - result = [] - v = navigate_value(d, ["read", "security_group_rules"], - array_index) - if not v: - return current_value - n = len(v) - - new_array_index = dict() - if array_index: - new_array_index.update(array_index) - - for i in range(n): - new_array_index["read.security_group_rules"] = i - - val = dict() - if len(result) >= (i + 1) and result[i]: - val = result[i] - - if not exclude_output: - v = navigate_value(d, ["read", "security_group_rules", "description"], - new_array_index) - val["description"] = v - - if not exclude_output: - v = navigate_value(d, ["read", "security_group_rules", "direction"], - new_array_index) - val["direction"] = v - - if not exclude_output: - v = navigate_value(d, ["read", "security_group_rules", "ethertype"], - new_array_index) - val["ethertype"] = v - - if not exclude_output: - v = navigate_value(d, ["read", "security_group_rules", "id"], - new_array_index) - val["id"] = v - - if not exclude_output: - v = navigate_value(d, ["read", "security_group_rules", "port_range_max"], - new_array_index) - val["port_range_max"] = v - - if not exclude_output: - v = navigate_value(d, ["read", "security_group_rules", "port_range_min"], - new_array_index) - val["port_range_min"] = v - - if not exclude_output: - v = navigate_value(d, ["read", "security_group_rules", "protocol"], - new_array_index) - val["protocol"] = v - - if not exclude_output: - v = navigate_value(d, ["read", "security_group_rules", "remote_address_group_id"], - new_array_index) - val["remote_address_group_id"] = v - - if not exclude_output: - v = navigate_value(d, ["read", "security_group_rules", "remote_group_id"], - new_array_index) - val["remote_group_id"] = v - - if not exclude_output: - v = navigate_value(d, ["read", "security_group_rules", "remote_ip_prefix"], - new_array_index) - val["remote_ip_prefix"] = v - - if len(result) >= (i + 1): - result[i] = val - else: - for v in val.values(): - if v is not None: - result.append(val) - break - - return result if (has_init_value or result) else current_value - - -def send_list_request(module, client, url): - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_vpc_security_group): error running " - "api(list), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["security_groups"], None) - - -def _build_identity_object(all_opts): - result = dict() - - v = navigate_value(all_opts, ["enterprise_project_id"], None) - result["enterprise_project_id"] = v - - result["id"] = None - - v = navigate_value(all_opts, ["name"], None) - result["name"] = v - - result["security_group_rules"] = None - - v = navigate_value(all_opts, ["vpc_id"], None) - result["vpc_id"] = v - - return result - - -def fill_list_resp_body(body): - result = dict() - - result["enterprise_project_id"] = body.get("enterprise_project_id") - - result["id"] = body.get("id") - - result["name"] = body.get("name") - - v = fill_list_resp_security_group_rules(body.get("security_group_rules")) - result["security_group_rules"] = v - - result["vpc_id"] = body.get("vpc_id") - - return result - - -def fill_list_resp_security_group_rules(value): - if not value: - return None - - result = [] - for item in value: - val = dict() - - val["description"] = item.get("description") - - val["direction"] = item.get("direction") - - val["ethertype"] = item.get("ethertype") - - val["id"] = item.get("id") - - val["port_range_max"] = item.get("port_range_max") - - val["port_range_min"] = item.get("port_range_min") - - val["protocol"] = item.get("protocol") - - val["remote_address_group_id"] = item.get("remote_address_group_id") - - val["remote_group_id"] = item.get("remote_group_id") - - val["remote_ip_prefix"] = item.get("remote_ip_prefix") - - val["security_group_id"] = item.get("security_group_id") - - result.append(val) - - return result - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_security_group_rule.py b/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_security_group_rule.py deleted file mode 100644 index f92c8276..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_security_group_rule.py +++ /dev/null @@ -1,570 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -############################################################################### -# Documentation -############################################################################### - -DOCUMENTATION = ''' ---- -module: hwc_vpc_security_group_rule -description: - - vpc security group management. -short_description: Creates a resource of Vpc/SecurityGroupRule in Huawei Cloud -notes: - - If I(id) option is provided, it takes precedence over - I(enterprise_project_id) for security group rule selection. - - I(security_group_id) is used for security group rule selection. If more - than one security group rule with this options exists, execution is - aborted. - - No parameter support updating. If one of option is changed, the module - will create a new resource. -version_added: '0.2.0' -author: Huawei Inc. (@huaweicloud) -requirements: - - keystoneauth1 >= 3.6.0 -options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - direction: - description: - - Specifies the direction of access control. The value can be - egress or ingress. - type: str - required: true - security_group_id: - description: - - Specifies the security group rule ID, which uniquely identifies - the security group rule. - type: str - required: true - description: - description: - - Provides supplementary information about the security group rule. - The value is a string of no more than 255 characters that can - contain letters and digits. - type: str - required: false - ethertype: - description: - - Specifies the IP protocol version. The value can be IPv4 or IPv6. - If you do not set this parameter, IPv4 is used by default. - type: str - required: false - port_range_max: - description: - - Specifies the end port number. The value ranges from 1 to 65535. - If the protocol is not icmp, the value cannot be smaller than the - port_range_min value. An empty value indicates all ports. - type: int - required: false - port_range_min: - description: - - Specifies the start port number. The value ranges from 1 to - 65535. The value cannot be greater than the port_range_max value. - An empty value indicates all ports. - type: int - required: false - protocol: - description: - - Specifies the protocol type. The value can be icmp, tcp, or udp. - If the parameter is left blank, the security group supports all - protocols. - type: str - required: false - remote_group_id: - description: - - Specifies the ID of the peer security group. The value is - exclusive with parameter remote_ip_prefix. - type: str - required: false - remote_ip_prefix: - description: - - Specifies the remote IP address. If the access control direction - is set to egress, the parameter specifies the source IP address. - If the access control direction is set to ingress, the parameter - specifies the destination IP address. The value can be in the - CIDR format or IP addresses. The parameter is exclusive with - parameter remote_group_id. - type: str - required: false -extends_documentation_fragment: -- community.general.hwc - -''' - -EXAMPLES = ''' -# create a security group rule -- name: Create a security group - hwc_vpc_security_group: - name: "ansible_network_security_group_test" - register: sg -- name: Create a security group rule - community.general.hwc_vpc_security_group_rule: - direction: "ingress" - protocol: "tcp" - ethertype: "IPv4" - port_range_max: 22 - security_group_id: "{{ sg.id }}" - port_range_min: 22 - remote_ip_prefix: "0.0.0.0/0" -''' - -RETURN = ''' - direction: - description: - - Specifies the direction of access control. The value can be - egress or ingress. - type: str - returned: success - security_group_id: - description: - - Specifies the security group rule ID, which uniquely identifies - the security group rule. - type: str - returned: success - description: - description: - - Provides supplementary information about the security group rule. - The value is a string of no more than 255 characters that can - contain letters and digits. - type: str - returned: success - ethertype: - description: - - Specifies the IP protocol version. The value can be IPv4 or IPv6. - If you do not set this parameter, IPv4 is used by default. - type: str - returned: success - port_range_max: - description: - - Specifies the end port number. The value ranges from 1 to 65535. - If the protocol is not icmp, the value cannot be smaller than the - port_range_min value. An empty value indicates all ports. - type: int - returned: success - port_range_min: - description: - - Specifies the start port number. The value ranges from 1 to - 65535. The value cannot be greater than the port_range_max value. - An empty value indicates all ports. - type: int - returned: success - protocol: - description: - - Specifies the protocol type. The value can be icmp, tcp, or udp. - If the parameter is left blank, the security group supports all - protocols. - type: str - returned: success - remote_group_id: - description: - - Specifies the ID of the peer security group. The value is - exclusive with parameter remote_ip_prefix. - type: str - returned: success - remote_ip_prefix: - description: - - Specifies the remote IP address. If the access control direction - is set to egress, the parameter specifies the source IP address. - If the access control direction is set to ingress, the parameter - specifies the destination IP address. The value can be in the - CIDR format or IP addresses. The parameter is exclusive with - parameter remote_group_id. - type: str - returned: success -''' - -from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( - Config, HwcClientException, HwcModule, are_different_dicts, build_path, - get_region, is_empty_value, navigate_value) - - -def build_module(): - return HwcModule( - argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - direction=dict(type='str', required=True), - security_group_id=dict(type='str', required=True), - description=dict(type='str'), - ethertype=dict(type='str'), - port_range_max=dict(type='int'), - port_range_min=dict(type='int'), - protocol=dict(type='str'), - remote_group_id=dict(type='str'), - remote_ip_prefix=dict(type='str') - ), - supports_check_mode=True, - ) - - -def main(): - """Main function""" - - module = build_module() - config = Config(module, "vpc") - - try: - resource = None - if module.params['id']: - resource = True - else: - v = search_resource(config) - if len(v) > 1: - raise Exception("Found more than one resource(%s)" % ", ".join([ - navigate_value(i, ["id"]) for i in v])) - - if len(v) == 1: - resource = v[0] - module.params['id'] = navigate_value(resource, ["id"]) - - result = {} - changed = False - if module.params['state'] == 'present': - if resource is None: - if not module.check_mode: - create(config) - changed = True - - current = read_resource(config, exclude_output=True) - expect = user_input_parameters(module) - if are_different_dicts(expect, current): - raise Exception( - "Cannot change option from (%s) to (%s) for an" - " existing security group(%s)." % (current, expect, module.params.get('id'))) - result = read_resource(config) - result['id'] = module.params.get('id') - else: - if resource: - if not module.check_mode: - delete(config) - changed = True - - except Exception as ex: - module.fail_json(msg=str(ex)) - - else: - result['changed'] = changed - module.exit_json(**result) - - -def user_input_parameters(module): - return { - "description": module.params.get("description"), - "direction": module.params.get("direction"), - "ethertype": module.params.get("ethertype"), - "port_range_max": module.params.get("port_range_max"), - "port_range_min": module.params.get("port_range_min"), - "protocol": module.params.get("protocol"), - "remote_group_id": module.params.get("remote_group_id"), - "remote_ip_prefix": module.params.get("remote_ip_prefix"), - "security_group_id": module.params.get("security_group_id"), - } - - -def create(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - opts = user_input_parameters(module) - - params = build_create_parameters(opts) - r = send_create_request(module, params, client) - module.params['id'] = navigate_value(r, ["security_group_rule", "id"]) - - -def delete(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - - send_delete_request(module, None, client) - - -def read_resource(config, exclude_output=False): - module = config.module - client = config.client(get_region(module), "vpc", "project") - - res = {} - - r = send_read_request(module, client) - res["read"] = fill_read_resp_body(r) - - return update_properties(module, res, None, exclude_output) - - -def _build_query_link(opts): - query_link = "?marker={marker}&limit=10" - v = navigate_value(opts, ["security_group_id"]) - if v: - query_link += "&security_group_id=" + str(v) - - return query_link - - -def search_resource(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - opts = user_input_parameters(module) - identity_obj = _build_identity_object(opts) - query_link = _build_query_link(opts) - link = "security-group-rules" + query_link - - result = [] - p = {'marker': ''} - while True: - url = link.format(**p) - r = send_list_request(module, client, url) - if not r: - break - - for item in r: - item = fill_list_resp_body(item) - if not are_different_dicts(identity_obj, item): - result.append(item) - - if len(result) > 1: - break - - p['marker'] = r[-1].get('id') - - return result - - -def build_create_parameters(opts): - params = dict() - - v = navigate_value(opts, ["description"], None) - if not is_empty_value(v): - params["description"] = v - - v = navigate_value(opts, ["direction"], None) - if not is_empty_value(v): - params["direction"] = v - - v = navigate_value(opts, ["ethertype"], None) - if not is_empty_value(v): - params["ethertype"] = v - - v = navigate_value(opts, ["port_range_max"], None) - if not is_empty_value(v): - params["port_range_max"] = v - - v = navigate_value(opts, ["port_range_min"], None) - if not is_empty_value(v): - params["port_range_min"] = v - - v = navigate_value(opts, ["protocol"], None) - if not is_empty_value(v): - params["protocol"] = v - - v = navigate_value(opts, ["remote_group_id"], None) - if not is_empty_value(v): - params["remote_group_id"] = v - - v = navigate_value(opts, ["remote_ip_prefix"], None) - if not is_empty_value(v): - params["remote_ip_prefix"] = v - - v = navigate_value(opts, ["security_group_id"], None) - if not is_empty_value(v): - params["security_group_id"] = v - - if not params: - return params - - params = {"security_group_rule": params} - - return params - - -def send_create_request(module, params, client): - url = "security-group-rules" - try: - r = client.post(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_security_group_rule): error running " - "api(create), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def send_delete_request(module, params, client): - url = build_path(module, "security-group-rules/{id}") - - try: - r = client.delete(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_security_group_rule): error running " - "api(delete), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def send_read_request(module, client): - url = build_path(module, "security-group-rules/{id}") - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_vpc_security_group_rule): error running " - "api(read), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["security_group_rule"], None) - - -def fill_read_resp_body(body): - result = dict() - - result["description"] = body.get("description") - - result["direction"] = body.get("direction") - - result["ethertype"] = body.get("ethertype") - - result["id"] = body.get("id") - - result["port_range_max"] = body.get("port_range_max") - - result["port_range_min"] = body.get("port_range_min") - - result["protocol"] = body.get("protocol") - - result["remote_address_group_id"] = body.get("remote_address_group_id") - - result["remote_group_id"] = body.get("remote_group_id") - - result["remote_ip_prefix"] = body.get("remote_ip_prefix") - - result["security_group_id"] = body.get("security_group_id") - - return result - - -def update_properties(module, response, array_index, exclude_output=False): - r = user_input_parameters(module) - - v = navigate_value(response, ["read", "description"], array_index) - r["description"] = v - - v = navigate_value(response, ["read", "direction"], array_index) - r["direction"] = v - - v = navigate_value(response, ["read", "ethertype"], array_index) - r["ethertype"] = v - - v = navigate_value(response, ["read", "port_range_max"], array_index) - r["port_range_max"] = v - - v = navigate_value(response, ["read", "port_range_min"], array_index) - r["port_range_min"] = v - - v = navigate_value(response, ["read", "protocol"], array_index) - r["protocol"] = v - - v = navigate_value(response, ["read", "remote_group_id"], array_index) - r["remote_group_id"] = v - - v = navigate_value(response, ["read", "remote_ip_prefix"], array_index) - r["remote_ip_prefix"] = v - - v = navigate_value(response, ["read", "security_group_id"], array_index) - r["security_group_id"] = v - - return r - - -def send_list_request(module, client, url): - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_vpc_security_group_rule): error running " - "api(list), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["security_group_rules"], None) - - -def _build_identity_object(all_opts): - result = dict() - - v = navigate_value(all_opts, ["description"], None) - result["description"] = v - - v = navigate_value(all_opts, ["direction"], None) - result["direction"] = v - - v = navigate_value(all_opts, ["ethertype"], None) - result["ethertype"] = v - - result["id"] = None - - v = navigate_value(all_opts, ["port_range_max"], None) - result["port_range_max"] = v - - v = navigate_value(all_opts, ["port_range_min"], None) - result["port_range_min"] = v - - v = navigate_value(all_opts, ["protocol"], None) - result["protocol"] = v - - result["remote_address_group_id"] = None - - v = navigate_value(all_opts, ["remote_group_id"], None) - result["remote_group_id"] = v - - v = navigate_value(all_opts, ["remote_ip_prefix"], None) - result["remote_ip_prefix"] = v - - v = navigate_value(all_opts, ["security_group_id"], None) - result["security_group_id"] = v - - return result - - -def fill_list_resp_body(body): - result = dict() - - result["description"] = body.get("description") - - result["direction"] = body.get("direction") - - result["ethertype"] = body.get("ethertype") - - result["id"] = body.get("id") - - result["port_range_max"] = body.get("port_range_max") - - result["port_range_min"] = body.get("port_range_min") - - result["protocol"] = body.get("protocol") - - result["remote_address_group_id"] = body.get("remote_address_group_id") - - result["remote_group_id"] = body.get("remote_group_id") - - result["remote_ip_prefix"] = body.get("remote_ip_prefix") - - result["security_group_id"] = body.get("security_group_id") - - return result - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_subnet.py b/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_subnet.py deleted file mode 100644 index ccf18050..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_subnet.py +++ /dev/null @@ -1,734 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -############################################################################### -# Documentation -############################################################################### - -DOCUMENTATION = ''' ---- -module: hwc_vpc_subnet -description: - - subnet management. -short_description: Creates a resource of Vpc/Subnet in Huawei Cloud -version_added: '0.2.0' -author: Huawei Inc. (@huaweicloud) -requirements: - - keystoneauth1 >= 3.6.0 -options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: - description: - - The timeouts for each operations. - type: dict - suboptions: - create: - description: - - The timeouts for create operation. - type: str - default: '15m' - update: - description: - - The timeouts for update operation. - type: str - default: '15m' - cidr: - description: - - Specifies the subnet CIDR block. The value must be within the VPC - CIDR block and be in CIDR format. The subnet mask cannot be - greater than 28. Cannot be changed after creating the subnet. - type: str - required: true - gateway_ip: - description: - - Specifies the gateway of the subnet. The value must be an IP - address in the subnet. Cannot be changed after creating the subnet. - type: str - required: true - name: - description: - - Specifies the subnet name. The value is a string of 1 to 64 - characters that can contain letters, digits, underscores C(_), - hyphens (-), and periods (.). - type: str - required: true - vpc_id: - description: - - Specifies the ID of the VPC to which the subnet belongs. Cannot - be changed after creating the subnet. - type: str - required: true - availability_zone: - description: - - Specifies the AZ to which the subnet belongs. Cannot be changed - after creating the subnet. - type: str - required: false - dhcp_enable: - description: - - Specifies whether DHCP is enabled for the subnet. The value can - be true (enabled) or false(disabled), and default value is true. - If this parameter is set to false, newly created ECSs cannot - obtain IP addresses, and usernames and passwords cannot be - injected using Cloud-init. - type: bool - required: false - dns_address: - description: - - Specifies the DNS server addresses for subnet. The address - in the head will be used first. - type: list - elements: str - required: false -extends_documentation_fragment: -- community.general.hwc - -''' - -EXAMPLES = ''' -# create subnet -- name: Create vpc - hwc_network_vpc: - cidr: "192.168.100.0/24" - name: "ansible_network_vpc_test" - register: vpc -- name: Create subnet - community.general.hwc_vpc_subnet: - vpc_id: "{{ vpc.id }}" - cidr: "192.168.100.0/26" - gateway_ip: "192.168.100.32" - name: "ansible_network_subnet_test" - dhcp_enable: True -''' - -RETURN = ''' - cidr: - description: - - Specifies the subnet CIDR block. The value must be within the VPC - CIDR block and be in CIDR format. The subnet mask cannot be - greater than 28. - type: str - returned: success - gateway_ip: - description: - - Specifies the gateway of the subnet. The value must be an IP - address in the subnet. - type: str - returned: success - name: - description: - - Specifies the subnet name. The value is a string of 1 to 64 - characters that can contain letters, digits, underscores C(_), - hyphens (-), and periods (.). - type: str - returned: success - vpc_id: - description: - - Specifies the ID of the VPC to which the subnet belongs. - type: str - returned: success - availability_zone: - description: - - Specifies the AZ to which the subnet belongs. - type: str - returned: success - dhcp_enable: - description: - - Specifies whether DHCP is enabled for the subnet. The value can - be true (enabled) or false(disabled), and default value is true. - If this parameter is set to false, newly created ECSs cannot - obtain IP addresses, and usernames and passwords cannot be - injected using Cloud-init. - type: bool - returned: success - dns_address: - description: - - Specifies the DNS server addresses for subnet. The address - in the head will be used first. - type: list - returned: success -''' - -from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( - Config, HwcClientException, HwcClientException404, HwcModule, - are_different_dicts, build_path, get_region, is_empty_value, - navigate_value, wait_to_finish) - - -def build_module(): - return HwcModule( - argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - timeouts=dict(type='dict', options=dict( - create=dict(default='15m', type='str'), - update=dict(default='15m', type='str'), - ), default=dict()), - cidr=dict(type='str', required=True), - gateway_ip=dict(type='str', required=True), - name=dict(type='str', required=True), - vpc_id=dict(type='str', required=True), - availability_zone=dict(type='str'), - dhcp_enable=dict(type='bool'), - dns_address=dict(type='list', elements='str') - ), - supports_check_mode=True, - ) - - -def main(): - """Main function""" - - module = build_module() - config = Config(module, "vpc") - - try: - resource = None - if module.params.get('id'): - resource = True - else: - v = search_resource(config) - if len(v) > 1: - raise Exception("Found more than one resource(%s)" % ", ".join([ - navigate_value(i, ["id"]) for i in v])) - - if len(v) == 1: - resource = v[0] - module.params['id'] = navigate_value(resource, ["id"]) - - result = {} - changed = False - if module.params['state'] == 'present': - if resource is None: - if not module.check_mode: - create(config) - changed = True - - current = read_resource(config, exclude_output=True) - expect = user_input_parameters(module) - if are_different_dicts(expect, current): - if not module.check_mode: - update(config) - changed = True - - result = read_resource(config) - result['id'] = module.params.get('id') - else: - if resource: - if not module.check_mode: - delete(config) - changed = True - - except Exception as ex: - module.fail_json(msg=str(ex)) - - else: - result['changed'] = changed - module.exit_json(**result) - - -def user_input_parameters(module): - return { - "availability_zone": module.params.get("availability_zone"), - "cidr": module.params.get("cidr"), - "dhcp_enable": module.params.get("dhcp_enable"), - "dns_address": module.params.get("dns_address"), - "gateway_ip": module.params.get("gateway_ip"), - "name": module.params.get("name"), - "vpc_id": module.params.get("vpc_id"), - } - - -def create(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) - opts = user_input_parameters(module) - - params = build_create_parameters(opts) - r = send_create_request(module, params, client) - obj = async_wait_create(config, r, client, timeout) - module.params['id'] = navigate_value(obj, ["subnet", "id"]) - - -def update(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - timeout = 60 * int(module.params['timeouts']['update'].rstrip('m')) - opts = user_input_parameters(module) - - params = build_update_parameters(opts) - if params: - r = send_update_request(module, params, client) - async_wait_update(config, r, client, timeout) - - -def delete(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - - send_delete_request(module, None, client) - - url = build_path(module, "subnets/{id}") - - def _refresh_status(): - try: - client.get(url) - except HwcClientException404: - return True, "Done" - - except Exception: - return None, "" - - return True, "Pending" - - timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) - try: - wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout) - except Exception as ex: - module.fail_json(msg="module(hwc_vpc_subnet): error " - "waiting for api(delete) to " - "be done, error= %s" % str(ex)) - - -def read_resource(config, exclude_output=False): - module = config.module - client = config.client(get_region(module), "vpc", "project") - - res = {} - - r = send_read_request(module, client) - res["read"] = fill_read_resp_body(r) - - return update_properties(module, res, None, exclude_output) - - -def _build_query_link(opts): - query_link = "?marker={marker}&limit=10" - v = navigate_value(opts, ["vpc_id"]) - if v: - query_link += "&vpc_id=" + str(v) - - return query_link - - -def search_resource(config): - module = config.module - client = config.client(get_region(module), "vpc", "project") - opts = user_input_parameters(module) - identity_obj = _build_identity_object(opts) - query_link = _build_query_link(opts) - link = "subnets" + query_link - - result = [] - p = {'marker': ''} - while True: - url = link.format(**p) - r = send_list_request(module, client, url) - if not r: - break - - for item in r: - item = fill_list_resp_body(item) - if not are_different_dicts(identity_obj, item): - result.append(item) - - if len(result) > 1: - break - - p['marker'] = r[-1].get('id') - - return result - - -def build_create_parameters(opts): - params = dict() - - v = navigate_value(opts, ["availability_zone"], None) - if not is_empty_value(v): - params["availability_zone"] = v - - v = navigate_value(opts, ["cidr"], None) - if not is_empty_value(v): - params["cidr"] = v - - v = navigate_value(opts, ["dhcp_enable"], None) - if v is not None: - params["dhcp_enable"] = v - - v = expand_create_dns_list(opts, None) - if not is_empty_value(v): - params["dnsList"] = v - - v = navigate_value(opts, ["gateway_ip"], None) - if not is_empty_value(v): - params["gateway_ip"] = v - - v = navigate_value(opts, ["name"], None) - if not is_empty_value(v): - params["name"] = v - - v = expand_create_primary_dns(opts, None) - if not is_empty_value(v): - params["primary_dns"] = v - - v = expand_create_secondary_dns(opts, None) - if not is_empty_value(v): - params["secondary_dns"] = v - - v = navigate_value(opts, ["vpc_id"], None) - if not is_empty_value(v): - params["vpc_id"] = v - - if not params: - return params - - params = {"subnet": params} - - return params - - -def expand_create_dns_list(d, array_index): - v = navigate_value(d, ["dns_address"], array_index) - return v if (v and len(v) > 2) else [] - - -def expand_create_primary_dns(d, array_index): - v = navigate_value(d, ["dns_address"], array_index) - return v[0] if v else "" - - -def expand_create_secondary_dns(d, array_index): - v = navigate_value(d, ["dns_address"], array_index) - return v[1] if (v and len(v) > 1) else "" - - -def send_create_request(module, params, client): - url = "subnets" - try: - r = client.post(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_subnet): error running " - "api(create), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def async_wait_create(config, result, client, timeout): - module = config.module - - path_parameters = { - "subnet_id": ["subnet", "id"], - } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) - - url = build_path(module, "subnets/{subnet_id}", data) - - def _query_status(): - r = None - try: - r = client.get(url, timeout=timeout) - except HwcClientException: - return None, "" - - try: - s = navigate_value(r, ["subnet", "status"]) - return r, s - except Exception: - return None, "" - - try: - return wait_to_finish( - ["ACTIVE"], - ["UNKNOWN"], - _query_status, timeout) - except Exception as ex: - module.fail_json(msg="module(hwc_vpc_subnet): error " - "waiting for api(create) to " - "be done, error= %s" % str(ex)) - - -def build_update_parameters(opts): - params = dict() - - v = navigate_value(opts, ["dhcp_enable"], None) - if v is not None: - params["dhcp_enable"] = v - - v = expand_update_dns_list(opts, None) - if v is not None: - params["dnsList"] = v - - v = navigate_value(opts, ["name"], None) - if not is_empty_value(v): - params["name"] = v - - v = expand_update_primary_dns(opts, None) - if v is not None: - params["primary_dns"] = v - - v = expand_update_secondary_dns(opts, None) - if v is not None: - params["secondary_dns"] = v - - if not params: - return params - - params = {"subnet": params} - - return params - - -def expand_update_dns_list(d, array_index): - v = navigate_value(d, ["dns_address"], array_index) - if v: - if len(v) > 2: - return v - return None - return [] - - -def expand_update_primary_dns(d, array_index): - v = navigate_value(d, ["dns_address"], array_index) - return v[0] if v else "" - - -def expand_update_secondary_dns(d, array_index): - v = navigate_value(d, ["dns_address"], array_index) - return v[1] if (v and len(v) > 1) else "" - - -def send_update_request(module, params, client): - url = build_path(module, "vpcs/{vpc_id}/subnets/{id}") - - try: - r = client.put(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_subnet): error running " - "api(update), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def async_wait_update(config, result, client, timeout): - module = config.module - - path_parameters = { - "subnet_id": ["subnet", "id"], - } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) - - url = build_path(module, "subnets/{subnet_id}", data) - - def _query_status(): - r = None - try: - r = client.get(url, timeout=timeout) - except HwcClientException: - return None, "" - - try: - s = navigate_value(r, ["subnet", "status"]) - return r, s - except Exception: - return None, "" - - try: - return wait_to_finish( - ["ACTIVE"], - ["UNKNOWN"], - _query_status, timeout) - except Exception as ex: - module.fail_json(msg="module(hwc_vpc_subnet): error " - "waiting for api(update) to " - "be done, error= %s" % str(ex)) - - -def send_delete_request(module, params, client): - url = build_path(module, "vpcs/{vpc_id}/subnets/{id}") - - try: - r = client.delete(url, params) - except HwcClientException as ex: - msg = ("module(hwc_vpc_subnet): error running " - "api(delete), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return r - - -def send_read_request(module, client): - url = build_path(module, "subnets/{id}") - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_vpc_subnet): error running " - "api(read), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["subnet"], None) - - -def fill_read_resp_body(body): - result = dict() - - result["availability_zone"] = body.get("availability_zone") - - result["cidr"] = body.get("cidr") - - result["dhcp_enable"] = body.get("dhcp_enable") - - result["dnsList"] = body.get("dnsList") - - result["gateway_ip"] = body.get("gateway_ip") - - result["id"] = body.get("id") - - result["name"] = body.get("name") - - result["neutron_network_id"] = body.get("neutron_network_id") - - result["neutron_subnet_id"] = body.get("neutron_subnet_id") - - result["primary_dns"] = body.get("primary_dns") - - result["secondary_dns"] = body.get("secondary_dns") - - result["status"] = body.get("status") - - result["vpc_id"] = body.get("vpc_id") - - return result - - -def update_properties(module, response, array_index, exclude_output=False): - r = user_input_parameters(module) - - v = navigate_value(response, ["read", "availability_zone"], array_index) - r["availability_zone"] = v - - v = navigate_value(response, ["read", "cidr"], array_index) - r["cidr"] = v - - v = navigate_value(response, ["read", "dhcp_enable"], array_index) - r["dhcp_enable"] = v - - v = navigate_value(response, ["read", "dnsList"], array_index) - r["dns_address"] = v - - v = navigate_value(response, ["read", "gateway_ip"], array_index) - r["gateway_ip"] = v - - v = navigate_value(response, ["read", "name"], array_index) - r["name"] = v - - v = navigate_value(response, ["read", "vpc_id"], array_index) - r["vpc_id"] = v - - return r - - -def send_list_request(module, client, url): - - r = None - try: - r = client.get(url) - except HwcClientException as ex: - msg = ("module(hwc_vpc_subnet): error running " - "api(list), error: %s" % str(ex)) - module.fail_json(msg=msg) - - return navigate_value(r, ["subnets"], None) - - -def _build_identity_object(all_opts): - result = dict() - - v = navigate_value(all_opts, ["availability_zone"], None) - result["availability_zone"] = v - - v = navigate_value(all_opts, ["cidr"], None) - result["cidr"] = v - - v = navigate_value(all_opts, ["dhcp_enable"], None) - result["dhcp_enable"] = v - - v = navigate_value(all_opts, ["dns_address"], None) - result["dnsList"] = v - - v = navigate_value(all_opts, ["gateway_ip"], None) - result["gateway_ip"] = v - - result["id"] = None - - v = navigate_value(all_opts, ["name"], None) - result["name"] = v - - result["neutron_network_id"] = None - - result["neutron_subnet_id"] = None - - result["primary_dns"] = None - - result["secondary_dns"] = None - - result["status"] = None - - v = navigate_value(all_opts, ["vpc_id"], None) - result["vpc_id"] = v - - return result - - -def fill_list_resp_body(body): - result = dict() - - result["availability_zone"] = body.get("availability_zone") - - result["cidr"] = body.get("cidr") - - result["dhcp_enable"] = body.get("dhcp_enable") - - result["dnsList"] = body.get("dnsList") - - result["gateway_ip"] = body.get("gateway_ip") - - result["id"] = body.get("id") - - result["name"] = body.get("name") - - result["neutron_network_id"] = body.get("neutron_network_id") - - result["neutron_subnet_id"] = body.get("neutron_subnet_id") - - result["primary_dns"] = body.get("primary_dns") - - result["secondary_dns"] = body.get("secondary_dns") - - result["status"] = body.get("status") - - result["vpc_id"] = body.get("vpc_id") - - return result - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/linode/linode.py b/ansible_collections/community/general/plugins/modules/cloud/linode/linode.py deleted file mode 100644 index c627fb70..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/linode/linode.py +++ /dev/null @@ -1,688 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: linode -short_description: Manage instances on the Linode Public Cloud -description: - - Manage Linode Public Cloud instances and optionally wait for it to be 'running'. -options: - state: - description: - - Indicate desired state of the resource - choices: [ absent, active, deleted, present, restarted, started, stopped ] - default: present - type: str - api_key: - description: - - Linode API key. - - C(LINODE_API_KEY) env variable can be used instead. - type: str - required: yes - name: - description: - - Name to give the instance (alphanumeric, dashes, underscore). - - To keep sanity on the Linode Web Console, name is prepended with C(LinodeID-). - required: true - type: str - displaygroup: - description: - - Add the instance to a Display Group in Linode Manager. - type: str - linode_id: - description: - - Unique ID of a linode server. This value is read-only in the sense that - if you specify it on creation of a Linode it will not be used. The - Linode API generates these IDs and we can those generated value here to - reference a Linode more specifically. This is useful for idempotence. - aliases: [ lid ] - type: int - additional_disks: - description: - - List of dictionaries for creating additional disks that are added to the Linode configuration settings. - - Dictionary takes Size, Label, Type. Size is in MB. - type: list - elements: dict - alert_bwin_enabled: - description: - - Set status of bandwidth in alerts. - type: bool - alert_bwin_threshold: - description: - - Set threshold in MB of bandwidth in alerts. - type: int - alert_bwout_enabled: - description: - - Set status of bandwidth out alerts. - type: bool - alert_bwout_threshold: - description: - - Set threshold in MB of bandwidth out alerts. - type: int - alert_bwquota_enabled: - description: - - Set status of bandwidth quota alerts as percentage of network transfer quota. - type: bool - alert_bwquota_threshold: - description: - - Set threshold in MB of bandwidth quota alerts. - type: int - alert_cpu_enabled: - description: - - Set status of receiving CPU usage alerts. - type: bool - alert_cpu_threshold: - description: - - Set percentage threshold for receiving CPU usage alerts. Each CPU core adds 100% to total. - type: int - alert_diskio_enabled: - description: - - Set status of receiving disk IO alerts. - type: bool - alert_diskio_threshold: - description: - - Set threshold for average IO ops/sec over 2 hour period. - type: int - backupsenabled: - description: - - Deprecated parameter, it will be removed in community.general C(5.0.0). - - To enable backups pass values to either I(backupweeklyday) or I(backupwindow). - type: int - backupweeklyday: - description: - - Day of the week to take backups. - type: int - backupwindow: - description: - - The time window in which backups will be taken. - type: int - plan: - description: - - plan to use for the instance (Linode plan) - type: int - payment_term: - description: - - payment term to use for the instance (payment term in months) - default: 1 - choices: [ 1, 12, 24 ] - type: int - password: - description: - - root password to apply to a new server (auto generated if missing) - type: str - private_ip: - description: - - Add private IPv4 address when Linode is created. - - Default is C(false). - type: bool - ssh_pub_key: - description: - - SSH public key applied to root user - type: str - swap: - description: - - swap size in MB - default: 512 - type: int - distribution: - description: - - distribution to use for the instance (Linode Distribution) - type: int - datacenter: - description: - - datacenter to create an instance in (Linode Datacenter) - type: int - kernel_id: - description: - - kernel to use for the instance (Linode Kernel) - type: int - wait: - description: - - wait for the instance to be in state C(running) before returning - type: bool - default: true - wait_timeout: - description: - - how long before wait gives up, in seconds - default: 300 - type: int - watchdog: - description: - - Set status of Lassie watchdog. - type: bool - default: "True" -requirements: - - python >= 2.6 - - linode-python -author: -- Vincent Viallet (@zbal) -notes: - - Please note, linode-python does not have python 3 support. - - This module uses the now deprecated v3 of the Linode API. - - Please review U(https://www.linode.com/api/linode) for determining the required parameters. -''' - -EXAMPLES = ''' - -- name: Create a new Linode - community.general.linode: - name: linode-test1 - plan: 1 - datacenter: 7 - distribution: 129 - state: present - register: linode_creation - -- name: Create a server with a private IP Address - community.general.linode: - module: linode - api_key: 'longStringFromLinodeApi' - name: linode-test1 - plan: 1 - datacenter: 2 - distribution: 99 - password: 'superSecureRootPassword' - private_ip: yes - ssh_pub_key: 'ssh-rsa qwerty' - swap: 768 - wait: yes - wait_timeout: 600 - state: present - delegate_to: localhost - register: linode_creation - -- name: Fully configure new server - community.general.linode: - api_key: 'longStringFromLinodeApi' - name: linode-test1 - plan: 4 - datacenter: 2 - distribution: 99 - kernel_id: 138 - password: 'superSecureRootPassword' - private_ip: yes - ssh_pub_key: 'ssh-rsa qwerty' - swap: 768 - wait: yes - wait_timeout: 600 - state: present - alert_bwquota_enabled: True - alert_bwquota_threshold: 80 - alert_bwin_enabled: True - alert_bwin_threshold: 10 - alert_cpu_enabled: True - alert_cpu_threshold: 210 - alert_bwout_enabled: True - alert_bwout_threshold: 10 - alert_diskio_enabled: True - alert_diskio_threshold: 10000 - backupweeklyday: 1 - backupwindow: 2 - displaygroup: 'test' - additional_disks: - - {Label: 'disk1', Size: 2500, Type: 'raw'} - - {Label: 'newdisk', Size: 2000} - watchdog: True - delegate_to: localhost - register: linode_creation - -- name: Ensure a running server (create if missing) - community.general.linode: - api_key: 'longStringFromLinodeApi' - name: linode-test1 - plan: 1 - datacenter: 2 - distribution: 99 - password: 'superSecureRootPassword' - ssh_pub_key: 'ssh-rsa qwerty' - swap: 768 - wait: yes - wait_timeout: 600 - state: present - delegate_to: localhost - register: linode_creation - -- name: Delete a server - community.general.linode: - api_key: 'longStringFromLinodeApi' - name: linode-test1 - linode_id: "{{ linode_creation.instance.id }}" - state: absent - delegate_to: localhost - -- name: Stop a server - community.general.linode: - api_key: 'longStringFromLinodeApi' - name: linode-test1 - linode_id: "{{ linode_creation.instance.id }}" - state: stopped - delegate_to: localhost - -- name: Reboot a server - community.general.linode: - api_key: 'longStringFromLinodeApi' - name: linode-test1 - linode_id: "{{ linode_creation.instance.id }}" - state: restarted - delegate_to: localhost -''' - -import time -import traceback - -LINODE_IMP_ERR = None -try: - from linode import api as linode_api - HAS_LINODE = True -except ImportError: - LINODE_IMP_ERR = traceback.format_exc() - HAS_LINODE = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback - - -def randompass(): - ''' - Generate a long random password that comply to Linode requirements - ''' - # Linode API currently requires the following: - # It must contain at least two of these four character classes: - # lower case letters - upper case letters - numbers - punctuation - # we play it safe :) - import random - import string - # as of python 2.4, this reseeds the PRNG from urandom - random.seed() - lower = ''.join(random.choice(string.ascii_lowercase) for x in range(6)) - upper = ''.join(random.choice(string.ascii_uppercase) for x in range(6)) - number = ''.join(random.choice(string.digits) for x in range(6)) - punct = ''.join(random.choice(string.punctuation) for x in range(6)) - p = lower + upper + number + punct - return ''.join(random.sample(p, len(p))) - - -def getInstanceDetails(api, server): - ''' - Return the details of an instance, populating IPs, etc. - ''' - instance = {'id': server['LINODEID'], - 'name': server['LABEL'], - 'public': [], - 'private': []} - - # Populate with ips - for ip in api.linode_ip_list(LinodeId=server['LINODEID']): - if ip['ISPUBLIC'] and 'ipv4' not in instance: - instance['ipv4'] = ip['IPADDRESS'] - instance['fqdn'] = ip['RDNS_NAME'] - if ip['ISPUBLIC']: - instance['public'].append({'ipv4': ip['IPADDRESS'], - 'fqdn': ip['RDNS_NAME'], - 'ip_id': ip['IPADDRESSID']}) - else: - instance['private'].append({'ipv4': ip['IPADDRESS'], - 'fqdn': ip['RDNS_NAME'], - 'ip_id': ip['IPADDRESSID']}) - return instance - - -def linodeServers(module, api, state, name, - displaygroup, plan, additional_disks, distribution, - datacenter, kernel_id, linode_id, payment_term, password, - private_ip, ssh_pub_key, swap, wait, wait_timeout, watchdog, **kwargs): - instances = [] - changed = False - new_server = False - servers = [] - disks = [] - configs = [] - jobs = [] - - # See if we can match an existing server details with the provided linode_id - if linode_id: - # For the moment we only consider linode_id as criteria for match - # Later we can use more (size, name, etc.) and update existing - servers = api.linode_list(LinodeId=linode_id) - # Attempt to fetch details about disks and configs only if servers are - # found with linode_id - if servers: - disks = api.linode_disk_list(LinodeId=linode_id) - configs = api.linode_config_list(LinodeId=linode_id) - - # Act on the state - if state in ('active', 'present', 'started'): - # TODO: validate all the plan / distribution / datacenter are valid - - # Multi step process/validation: - # - need linode_id (entity) - # - need disk_id for linode_id - create disk from distrib - # - need config_id for linode_id - create config (need kernel) - - # Any create step triggers a job that need to be waited for. - if not servers: - for arg in (name, plan, distribution, datacenter): - if not arg: - module.fail_json(msg='%s is required for %s state' % (arg, state)) - # Create linode entity - new_server = True - - # Get size of all individually listed disks to subtract from Distribution disk - used_disk_space = 0 if additional_disks is None else sum(disk['Size'] for disk in additional_disks) - - try: - res = api.linode_create(DatacenterID=datacenter, PlanID=plan, - PaymentTerm=payment_term) - linode_id = res['LinodeID'] - # Update linode Label to match name - api.linode_update(LinodeId=linode_id, Label='%s-%s' % (linode_id, name)) - # Update Linode with Ansible configuration options - api.linode_update(LinodeId=linode_id, LPM_DISPLAYGROUP=displaygroup, WATCHDOG=watchdog, **kwargs) - # Save server - servers = api.linode_list(LinodeId=linode_id) - except Exception as e: - module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE']) - - # Add private IP to Linode - if private_ip: - try: - res = api.linode_ip_addprivate(LinodeID=linode_id) - except Exception as e: - module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) - - if not disks: - for arg in (name, linode_id, distribution): - if not arg: - module.fail_json(msg='%s is required for %s state' % (arg, state)) - # Create disks (1 from distrib, 1 for SWAP) - new_server = True - try: - if not password: - # Password is required on creation, if not provided generate one - password = randompass() - if not swap: - swap = 512 - # Create data disk - size = servers[0]['TOTALHD'] - used_disk_space - swap - - if ssh_pub_key: - res = api.linode_disk_createfromdistribution( - LinodeId=linode_id, DistributionID=distribution, - rootPass=password, rootSSHKey=ssh_pub_key, - Label='%s data disk (lid: %s)' % (name, linode_id), - Size=size) - else: - res = api.linode_disk_createfromdistribution( - LinodeId=linode_id, DistributionID=distribution, - rootPass=password, - Label='%s data disk (lid: %s)' % (name, linode_id), - Size=size) - jobs.append(res['JobID']) - # Create SWAP disk - res = api.linode_disk_create(LinodeId=linode_id, Type='swap', - Label='%s swap disk (lid: %s)' % (name, linode_id), - Size=swap) - # Create individually listed disks at specified size - if additional_disks: - for disk in additional_disks: - # If a disk Type is not passed in, default to ext4 - if disk.get('Type') is None: - disk['Type'] = 'ext4' - res = api.linode_disk_create(LinodeID=linode_id, Label=disk['Label'], Size=disk['Size'], Type=disk['Type']) - - jobs.append(res['JobID']) - except Exception as e: - # TODO: destroy linode ? - module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) - - if not configs: - for arg in (name, linode_id, distribution): - if not arg: - module.fail_json(msg='%s is required for %s state' % (arg, state)) - - # Check architecture - for distrib in api.avail_distributions(): - if distrib['DISTRIBUTIONID'] != distribution: - continue - arch = '32' - if distrib['IS64BIT']: - arch = '64' - break - - # Get latest kernel matching arch if kernel_id is not specified - if not kernel_id: - for kernel in api.avail_kernels(): - if not kernel['LABEL'].startswith('Latest %s' % arch): - continue - kernel_id = kernel['KERNELID'] - break - - # Get disk list - disks_id = [] - for disk in api.linode_disk_list(LinodeId=linode_id): - if disk['TYPE'] == 'ext3': - disks_id.insert(0, str(disk['DISKID'])) - continue - disks_id.append(str(disk['DISKID'])) - # Trick to get the 9 items in the list - while len(disks_id) < 9: - disks_id.append('') - disks_list = ','.join(disks_id) - - # Create config - new_server = True - try: - api.linode_config_create(LinodeId=linode_id, KernelId=kernel_id, - Disklist=disks_list, Label='%s config' % name) - configs = api.linode_config_list(LinodeId=linode_id) - except Exception as e: - module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) - - # Start / Ensure servers are running - for server in servers: - # Refresh server state - server = api.linode_list(LinodeId=server['LINODEID'])[0] - # Ensure existing servers are up and running, boot if necessary - if server['STATUS'] != 1: - res = api.linode_boot(LinodeId=linode_id) - jobs.append(res['JobID']) - changed = True - - # wait here until the instances are up - wait_timeout = time.time() + wait_timeout - while wait and wait_timeout > time.time(): - # refresh the server details - server = api.linode_list(LinodeId=server['LINODEID'])[0] - # status: - # -2: Boot failed - # 1: Running - if server['STATUS'] in (-2, 1): - break - time.sleep(5) - if wait and wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg='Timeout waiting on %s (lid: %s)' % (server['LABEL'], server['LINODEID'])) - # Get a fresh copy of the server details - server = api.linode_list(LinodeId=server['LINODEID'])[0] - if server['STATUS'] == -2: - module.fail_json(msg='%s (lid: %s) failed to boot' % - (server['LABEL'], server['LINODEID'])) - # From now on we know the task is a success - # Build instance report - instance = getInstanceDetails(api, server) - # depending on wait flag select the status - if wait: - instance['status'] = 'Running' - else: - instance['status'] = 'Starting' - - # Return the root password if this is a new box and no SSH key - # has been provided - if new_server and not ssh_pub_key: - instance['password'] = password - instances.append(instance) - - elif state in ('stopped',): - if not servers: - module.fail_json(msg='Server (lid: %s) not found' % (linode_id)) - - for server in servers: - instance = getInstanceDetails(api, server) - if server['STATUS'] != 2: - try: - res = api.linode_shutdown(LinodeId=linode_id) - except Exception as e: - module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) - instance['status'] = 'Stopping' - changed = True - else: - instance['status'] = 'Stopped' - instances.append(instance) - - elif state in ('restarted',): - if not servers: - module.fail_json(msg='Server (lid: %s) not found' % (linode_id)) - - for server in servers: - instance = getInstanceDetails(api, server) - try: - res = api.linode_reboot(LinodeId=server['LINODEID']) - except Exception as e: - module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) - instance['status'] = 'Restarting' - changed = True - instances.append(instance) - - elif state in ('absent', 'deleted'): - for server in servers: - instance = getInstanceDetails(api, server) - try: - api.linode_delete(LinodeId=server['LINODEID'], skipChecks=True) - except Exception as e: - module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) - instance['status'] = 'Deleting' - changed = True - instances.append(instance) - - # Ease parsing if only 1 instance - if len(instances) == 1: - module.exit_json(changed=changed, instance=instances[0]) - - module.exit_json(changed=changed, instances=instances) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - state=dict(type='str', default='present', - choices=['absent', 'active', 'deleted', 'present', 'restarted', 'started', 'stopped']), - api_key=dict(type='str', no_log=True, required=True, fallback=(env_fallback, ['LINODE_API_KEY'])), - name=dict(type='str', required=True), - alert_bwin_enabled=dict(type='bool'), - alert_bwin_threshold=dict(type='int'), - alert_bwout_enabled=dict(type='bool'), - alert_bwout_threshold=dict(type='int'), - alert_bwquota_enabled=dict(type='bool'), - alert_bwquota_threshold=dict(type='int'), - alert_cpu_enabled=dict(type='bool'), - alert_cpu_threshold=dict(type='int'), - alert_diskio_enabled=dict(type='bool'), - alert_diskio_threshold=dict(type='int'), - backupsenabled=dict(type='int', removed_in_version='5.0.0', removed_from_collection='community.general'), - backupweeklyday=dict(type='int'), - backupwindow=dict(type='int'), - displaygroup=dict(type='str', default=''), - plan=dict(type='int'), - additional_disks=dict(type='list', elements='dict'), - distribution=dict(type='int'), - datacenter=dict(type='int'), - kernel_id=dict(type='int'), - linode_id=dict(type='int', aliases=['lid']), - payment_term=dict(type='int', default=1, choices=[1, 12, 24]), - password=dict(type='str', no_log=True), - private_ip=dict(type='bool'), - ssh_pub_key=dict(type='str'), - swap=dict(type='int', default=512), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=300), - watchdog=dict(type='bool', default=True), - ), - required_if=[ - ('state', 'restarted', ['linode_id']), - ('state', 'stopped', ['linode_id']), - ] - ) - - if not HAS_LINODE: - module.fail_json(msg=missing_required_lib('linode-python'), exception=LINODE_IMP_ERR) - - state = module.params.get('state') - api_key = module.params.get('api_key') - name = module.params.get('name') - alert_bwin_enabled = module.params.get('alert_bwin_enabled') - alert_bwin_threshold = module.params.get('alert_bwin_threshold') - alert_bwout_enabled = module.params.get('alert_bwout_enabled') - alert_bwout_threshold = module.params.get('alert_bwout_threshold') - alert_bwquota_enabled = module.params.get('alert_bwquota_enabled') - alert_bwquota_threshold = module.params.get('alert_bwquota_threshold') - alert_cpu_enabled = module.params.get('alert_cpu_enabled') - alert_cpu_threshold = module.params.get('alert_cpu_threshold') - alert_diskio_enabled = module.params.get('alert_diskio_enabled') - alert_diskio_threshold = module.params.get('alert_diskio_threshold') - backupweeklyday = module.params.get('backupweeklyday') - backupwindow = module.params.get('backupwindow') - displaygroup = module.params.get('displaygroup') - plan = module.params.get('plan') - additional_disks = module.params.get('additional_disks') - distribution = module.params.get('distribution') - datacenter = module.params.get('datacenter') - kernel_id = module.params.get('kernel_id') - linode_id = module.params.get('linode_id') - payment_term = module.params.get('payment_term') - password = module.params.get('password') - private_ip = module.params.get('private_ip') - ssh_pub_key = module.params.get('ssh_pub_key') - swap = module.params.get('swap') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - watchdog = int(module.params.get('watchdog')) - - check_items = dict( - alert_bwin_enabled=alert_bwin_enabled, - alert_bwin_threshold=alert_bwin_threshold, - alert_bwout_enabled=alert_bwout_enabled, - alert_bwout_threshold=alert_bwout_threshold, - alert_bwquota_enabled=alert_bwquota_enabled, - alert_bwquota_threshold=alert_bwquota_threshold, - alert_cpu_enabled=alert_cpu_enabled, - alert_cpu_threshold=alert_cpu_threshold, - alert_diskio_enabled=alert_diskio_enabled, - alert_diskio_threshold=alert_diskio_threshold, - backupweeklyday=backupweeklyday, - backupwindow=backupwindow, - ) - - kwargs = dict((k, v) for k, v in check_items.items() if v is not None) - - # setup the auth - try: - api = linode_api.Api(api_key) - api.test_echo() - except Exception as e: - module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) - - linodeServers(module, api, state, name, - displaygroup, plan, - additional_disks, distribution, datacenter, kernel_id, linode_id, - payment_term, password, private_ip, ssh_pub_key, swap, wait, - wait_timeout, watchdog, **kwargs) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/linode/linode_v4.py b/ansible_collections/community/general/plugins/modules/cloud/linode/linode_v4.py deleted file mode 100644 index fcf3725b..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/linode/linode_v4.py +++ /dev/null @@ -1,312 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ -# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: linode_v4 -short_description: Manage instances on the Linode cloud. -description: Manage instances on the Linode cloud. -requirements: - - python >= 2.7 - - linode_api4 >= 2.0.0 -author: - - Luke Murphy (@decentral1se) -notes: - - No Linode resizing is currently implemented. This module will, in time, - replace the current Linode module which uses deprecated API bindings on the - Linode side. -options: - region: - description: - - The region of the instance. This is a required parameter only when - creating Linode instances. See - U(https://www.linode.com/docs/api/regions/). - type: str - image: - description: - - The image of the instance. This is a required parameter only when - creating Linode instances. See - U(https://www.linode.com/docs/api/images/). - type: str - type: - description: - - The type of the instance. This is a required parameter only when - creating Linode instances. See - U(https://www.linode.com/docs/api/linode-types/). - type: str - label: - description: - - The instance label. This label is used as the main determiner for - idempotence for the module and is therefore mandatory. - type: str - required: true - group: - description: - - The group that the instance should be marked under. Please note, that - group labelling is deprecated but still supported. The encouraged - method for marking instances is to use tags. - type: str - private_ip: - description: - - If C(true), the created Linode will have private networking enabled and - assigned a private IPv4 address. - type: bool - default: false - version_added: 3.0.0 - tags: - description: - - The tags that the instance should be marked under. See - U(https://www.linode.com/docs/api/tags/). - type: list - elements: str - root_pass: - description: - - The password for the root user. If not specified, one will be - generated. This generated password will be available in the task - success JSON. - type: str - authorized_keys: - description: - - A list of SSH public key parts to deploy for the root user. - type: list - elements: str - state: - description: - - The desired instance state. - type: str - choices: - - present - - absent - required: true - access_token: - description: - - The Linode API v4 access token. It may also be specified by exposing - the C(LINODE_ACCESS_TOKEN) environment variable. See - U(https://www.linode.com/docs/api#access-and-authentication). - required: true - type: str - stackscript_id: - description: - - The numeric ID of the StackScript to use when creating the instance. - See U(https://www.linode.com/docs/api/stackscripts/). - type: int - version_added: 1.3.0 - stackscript_data: - description: - - An object containing arguments to any User Defined Fields present in - the StackScript used when creating the instance. - Only valid when a stackscript_id is provided. - See U(https://www.linode.com/docs/api/stackscripts/). - type: dict - version_added: 1.3.0 -''' - -EXAMPLES = """ -- name: Create a new Linode. - community.general.linode_v4: - label: new-linode - type: g6-nanode-1 - region: eu-west - image: linode/debian9 - root_pass: passw0rd - authorized_keys: - - "ssh-rsa ..." - stackscript_id: 1337 - stackscript_data: - variable: value - state: present - -- name: Delete that new Linode. - community.general.linode_v4: - label: new-linode - state: absent -""" - -RETURN = """ -instance: - description: The instance description in JSON serialized form. - returned: Always. - type: dict - sample: { - "root_pass": "foobar", # if auto-generated - "alerts": { - "cpu": 90, - "io": 10000, - "network_in": 10, - "network_out": 10, - "transfer_quota": 80 - }, - "backups": { - "enabled": false, - "schedule": { - "day": null, - "window": null - } - }, - "created": "2018-09-26T08:12:33", - "group": "Foobar Group", - "hypervisor": "kvm", - "id": 10480444, - "image": "linode/centos7", - "ipv4": [ - "130.132.285.233" - ], - "ipv6": "2a82:7e00::h03c:46ff:fe04:5cd2/64", - "label": "lin-foo", - "region": "eu-west", - "specs": { - "disk": 25600, - "memory": 1024, - "transfer": 1000, - "vcpus": 1 - }, - "status": "running", - "tags": [], - "type": "g6-nanode-1", - "updated": "2018-09-26T10:10:14", - "watchdog_enabled": true - } -""" - -import traceback - -from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib -from ansible_collections.community.general.plugins.module_utils.linode import get_user_agent - -LINODE_IMP_ERR = None -try: - from linode_api4 import Instance, LinodeClient - HAS_LINODE_DEPENDENCY = True -except ImportError: - LINODE_IMP_ERR = traceback.format_exc() - HAS_LINODE_DEPENDENCY = False - - -def create_linode(module, client, **kwargs): - """Creates a Linode instance and handles return format.""" - if kwargs['root_pass'] is None: - kwargs.pop('root_pass') - - try: - response = client.linode.instance_create(**kwargs) - except Exception as exception: - module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception) - - try: - if isinstance(response, tuple): - instance, root_pass = response - instance_json = instance._raw_json - instance_json.update({'root_pass': root_pass}) - return instance_json - else: - return response._raw_json - except TypeError: - module.fail_json(msg='Unable to parse Linode instance creation response. Please raise a bug against this' - ' module on https://github.com/ansible-collections/community.general/issues' - ) - - -def maybe_instance_from_label(module, client): - """Try to retrieve an instance based on a label.""" - try: - label = module.params['label'] - result = client.linode.instances(Instance.label == label) - return result[0] - except IndexError: - return None - except Exception as exception: - module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception) - - -def initialise_module(): - """Initialise the module parameter specification.""" - return AnsibleModule( - argument_spec=dict( - label=dict(type='str', required=True), - state=dict( - type='str', - required=True, - choices=['present', 'absent'] - ), - access_token=dict( - type='str', - required=True, - no_log=True, - fallback=(env_fallback, ['LINODE_ACCESS_TOKEN']), - ), - authorized_keys=dict(type='list', elements='str', no_log=False), - group=dict(type='str'), - image=dict(type='str'), - private_ip=dict(type='bool', default=False), - region=dict(type='str'), - root_pass=dict(type='str', no_log=True), - tags=dict(type='list', elements='str'), - type=dict(type='str'), - stackscript_id=dict(type='int'), - stackscript_data=dict(type='dict'), - ), - supports_check_mode=False, - required_one_of=( - ['state', 'label'], - ), - required_together=( - ['region', 'image', 'type'], - ) - ) - - -def build_client(module): - """Build a LinodeClient.""" - return LinodeClient( - module.params['access_token'], - user_agent=get_user_agent('linode_v4_module') - ) - - -def main(): - """Module entrypoint.""" - module = initialise_module() - - if not HAS_LINODE_DEPENDENCY: - module.fail_json(msg=missing_required_lib('linode-api4'), exception=LINODE_IMP_ERR) - - client = build_client(module) - instance = maybe_instance_from_label(module, client) - - if module.params['state'] == 'present' and instance is not None: - module.exit_json(changed=False, instance=instance._raw_json) - - elif module.params['state'] == 'present' and instance is None: - instance_json = create_linode( - module, client, - authorized_keys=module.params['authorized_keys'], - group=module.params['group'], - image=module.params['image'], - label=module.params['label'], - private_ip=module.params['private_ip'], - region=module.params['region'], - root_pass=module.params['root_pass'], - tags=module.params['tags'], - ltype=module.params['type'], - stackscript=module.params['stackscript_id'], - stackscript_data=module.params['stackscript_data'], - ) - module.exit_json(changed=True, instance=instance_json) - - elif module.params['state'] == 'absent' and instance is not None: - instance.delete() - module.exit_json(changed=True, instance=instance._raw_json) - - elif module.params['state'] == 'absent' and instance is None: - module.exit_json(changed=False, instance={}) - - -if __name__ == "__main__": - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/lxc/lxc_container.py b/ansible_collections/community/general/plugins/modules/cloud/lxc/lxc_container.py deleted file mode 100644 index c8c577ab..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/lxc/lxc_container.py +++ /dev/null @@ -1,1743 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2014, Kevin Carter -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: lxc_container -short_description: Manage LXC Containers -description: - - Management of LXC containers. -author: "Kevin Carter (@cloudnull)" -options: - name: - description: - - Name of a container. - type: str - required: true - backing_store: - choices: - - dir - - lvm - - loop - - btrfs - - overlayfs - - zfs - description: - - Backend storage type for the container. - type: str - default: dir - template: - description: - - Name of the template to use within an LXC create. - type: str - default: ubuntu - template_options: - description: - - Template options when building the container. - type: str - config: - description: - - Path to the LXC configuration file. - type: path - lv_name: - description: - - Name of the logical volume, defaults to the container name. - - If not specified, it defaults to C($CONTAINER_NAME). - type: str - vg_name: - description: - - If backend store is lvm, specify the name of the volume group. - type: str - default: lxc - thinpool: - description: - - Use LVM thin pool called TP. - type: str - fs_type: - description: - - Create fstype TYPE. - type: str - default: ext4 - fs_size: - description: - - File system Size. - type: str - default: 5G - directory: - description: - - Place rootfs directory under DIR. - type: path - zfs_root: - description: - - Create zfs under given zfsroot. - type: str - container_command: - description: - - Run a command within a container. - type: str - lxc_path: - description: - - Place container under PATH. - type: path - container_log: - description: - - Enable a container log for host actions to the container. - type: bool - default: 'no' - container_log_level: - choices: - - Info - - info - - INFO - - Error - - error - - ERROR - - Debug - - debug - - DEBUG - description: - - Set the log level for a container where *container_log* was set. - type: str - required: false - default: INFO - clone_name: - description: - - Name of the new cloned server. - - This is only used when state is clone. - type: str - clone_snapshot: - description: - - Create a snapshot a container when cloning. - - This is not supported by all container storage backends. - - Enabling this may fail if the backing store does not support snapshots. - type: bool - default: 'no' - archive: - description: - - Create an archive of a container. - - This will create a tarball of the running container. - type: bool - default: 'no' - archive_path: - description: - - Path the save the archived container. - - If the path does not exist the archive method will attempt to create it. - type: path - archive_compression: - choices: - - gzip - - bzip2 - - none - description: - - Type of compression to use when creating an archive of a running - container. - type: str - default: gzip - state: - choices: - - started - - stopped - - restarted - - absent - - frozen - - clone - description: - - Define the state of a container. - - If you clone a container using I(clone_name) the newly cloned - container created in a stopped state. - - The running container will be stopped while the clone operation is - happening and upon completion of the clone the original container - state will be restored. - type: str - default: started - container_config: - description: - - A list of C(key=value) options to use when configuring a container. - type: list - elements: str -requirements: - - 'lxc >= 1.0 # OS package' - - 'python >= 2.6 # OS Package' - - 'lxc-python2 >= 0.1 # PIP Package from https://github.com/lxc/python2-lxc' -notes: - - Containers must have a unique name. If you attempt to create a container - with a name that already exists in the users namespace the module will - simply return as "unchanged". - - The "container_command" can be used with any state except "absent". If - used with state "stopped" the container will be "started", the command - executed, and then the container "stopped" again. Likewise if the state - is "stopped" and the container does not exist it will be first created, - "started", the command executed, and then "stopped". If you use a "|" - in the variable you can use common script formatting within the variable - itself The "container_command" option will always execute as BASH. - When using "container_command" a log file is created in the /tmp/ directory - which contains both stdout and stderr of any command executed. - - If "archive" is **true** the system will attempt to create a compressed - tarball of the running container. The "archive" option supports LVM backed - containers and will create a snapshot of the running container when - creating the archive. - - If your distro does not have a package for "python2-lxc", which is a - requirement for this module, it can be installed from source at - "https://github.com/lxc/python2-lxc" or installed via pip using the package - name lxc-python2. -''' - -EXAMPLES = r""" -- name: Create a started container - community.general.lxc_container: - name: test-container-started - container_log: true - template: ubuntu - state: started - template_options: --release trusty - -- name: Create a stopped container - community.general.lxc_container: - name: test-container-stopped - container_log: true - template: ubuntu - state: stopped - template_options: --release trusty - -- name: Create a frozen container - community.general.lxc_container: - name: test-container-frozen - container_log: true - template: ubuntu - state: frozen - template_options: --release trusty - container_command: | - echo 'hello world.' | tee /opt/started-frozen - -# Create filesystem container, configure it, and archive it, and start it. -- name: Create filesystem container - community.general.lxc_container: - name: test-container-config - backing_store: dir - container_log: true - template: ubuntu - state: started - archive: true - archive_compression: none - container_config: - - "lxc.aa_profile=unconfined" - - "lxc.cgroup.devices.allow=a *:* rmw" - template_options: --release trusty - -# Create an lvm container, run a complex command in it, add additional -# configuration to it, create an archive of it, and finally leave the container -# in a frozen state. The container archive will be compressed using bzip2 -- name: Create a frozen lvm container - community.general.lxc_container: - name: test-container-lvm - container_log: true - template: ubuntu - state: frozen - backing_store: lvm - template_options: --release trusty - container_command: | - apt-get update - apt-get install -y vim lxc-dev - echo 'hello world.' | tee /opt/started - if [[ -f "/opt/started" ]]; then - echo 'hello world.' | tee /opt/found-started - fi - container_config: - - "lxc.aa_profile=unconfined" - - "lxc.cgroup.devices.allow=a *:* rmw" - archive: true - archive_compression: bzip2 - register: lvm_container_info - -- name: Debug info on container "test-container-lvm" - ansible.builtin.debug: - var: lvm_container_info - -- name: Run a command in a container and ensure its in a "stopped" state. - community.general.lxc_container: - name: test-container-started - state: stopped - container_command: | - echo 'hello world.' | tee /opt/stopped - -- name: Run a command in a container and ensure its it in a "frozen" state. - community.general.lxc_container: - name: test-container-stopped - state: frozen - container_command: | - echo 'hello world.' | tee /opt/frozen - -- name: Start a container - community.general.lxc_container: - name: test-container-stopped - state: started - -- name: Run a command in a container and then restart it - community.general.lxc_container: - name: test-container-started - state: restarted - container_command: | - echo 'hello world.' | tee /opt/restarted - -- name: Run a complex command within a "running" container - community.general.lxc_container: - name: test-container-started - container_command: | - apt-get update - apt-get install -y curl wget vim apache2 - echo 'hello world.' | tee /opt/started - if [[ -f "/opt/started" ]]; then - echo 'hello world.' | tee /opt/found-started - fi - -# Create an archive of an existing container, save the archive to a defined -# path and then destroy it. -- name: Archive container - community.general.lxc_container: - name: test-container-started - state: absent - archive: true - archive_path: /opt/archives - -# Create a container using overlayfs, create an archive of it, create a -# snapshot clone of the container and and finally leave the container -# in a frozen state. The container archive will be compressed using gzip. -- name: Create an overlayfs container archive and clone it - community.general.lxc_container: - name: test-container-overlayfs - container_log: true - template: ubuntu - state: started - backing_store: overlayfs - template_options: --release trusty - clone_snapshot: true - clone_name: test-container-overlayfs-clone-snapshot - archive: true - archive_compression: gzip - register: clone_container_info - -- name: Debug info on container "test-container" - ansible.builtin.debug: - var: clone_container_info - -- name: Clone a container using snapshot - community.general.lxc_container: - name: test-container-overlayfs-clone-snapshot - backing_store: overlayfs - clone_name: test-container-overlayfs-clone-snapshot2 - clone_snapshot: true - -- name: Create a new container and clone it - community.general.lxc_container: - name: test-container-new-archive - backing_store: dir - clone_name: test-container-new-archive-clone - -- name: Archive and clone a container then destroy it - community.general.lxc_container: - name: test-container-new-archive - state: absent - clone_name: test-container-new-archive-destroyed-clone - archive: true - archive_compression: gzip - -- name: Start a cloned container. - community.general.lxc_container: - name: test-container-new-archive-destroyed-clone - state: started - -- name: Destroy a container - community.general.lxc_container: - name: '{{ item }}' - state: absent - with_items: - - test-container-stopped - - test-container-started - - test-container-frozen - - test-container-lvm - - test-container-config - - test-container-overlayfs - - test-container-overlayfs-clone - - test-container-overlayfs-clone-snapshot - - test-container-overlayfs-clone-snapshot2 - - test-container-new-archive - - test-container-new-archive-clone - - test-container-new-archive-destroyed-clone -""" - -RETURN = r""" -lxc_container: - description: container information - returned: success - type: complex - contains: - name: - description: name of the lxc container - returned: success - type: str - sample: test_host - init_pid: - description: pid of the lxc init process - returned: success - type: int - sample: 19786 - interfaces: - description: list of the container's network interfaces - returned: success - type: list - sample: [ "eth0", "lo" ] - ips: - description: list of ips - returned: success - type: list - sample: [ "10.0.3.3" ] - state: - description: resulting state of the container - returned: success - type: str - sample: "running" - archive: - description: resulting state of the container - returned: success, when archive is true - type: str - sample: "/tmp/test-container-config.tar" - clone: - description: if the container was cloned - returned: success, when clone_name is specified - type: bool - sample: True -""" - -import os -import os.path -import re -import shutil -import subprocess -import tempfile -import time -import shlex - -try: - import lxc -except ImportError: - HAS_LXC = False -else: - HAS_LXC = True - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE -from ansible.module_utils.six.moves import xrange -from ansible.module_utils.common.text.converters import to_text, to_bytes - - -# LXC_COMPRESSION_MAP is a map of available compression types when creating -# an archive of a container. -LXC_COMPRESSION_MAP = { - 'gzip': { - 'extension': 'tar.tgz', - 'argument': '-czf' - }, - 'bzip2': { - 'extension': 'tar.bz2', - 'argument': '-cjf' - }, - 'none': { - 'extension': 'tar', - 'argument': '-cf' - } -} - - -# LXC_COMMAND_MAP is a map of variables that are available to a method based -# on the state the container is in. -LXC_COMMAND_MAP = { - 'create': { - 'variables': { - 'config': '--config', - 'template': '--template', - 'backing_store': '--bdev', - 'lxc_path': '--lxcpath', - 'lv_name': '--lvname', - 'vg_name': '--vgname', - 'thinpool': '--thinpool', - 'fs_type': '--fstype', - 'fs_size': '--fssize', - 'directory': '--dir', - 'zfs_root': '--zfsroot' - } - }, - 'clone': { - 'variables-lxc-copy': { - 'backing_store': '--backingstorage', - 'lxc_path': '--lxcpath', - 'fs_size': '--fssize', - 'name': '--name', - 'clone_name': '--newname' - }, - # lxc-clone is deprecated in favor of lxc-copy - 'variables-lxc-clone': { - 'backing_store': '--backingstore', - 'lxc_path': '--lxcpath', - 'fs_size': '--fssize', - 'name': '--orig', - 'clone_name': '--new' - } - } -} - - -# LXC_BACKING_STORE is a map of available storage backends and options that -# are incompatible with the given storage backend. -LXC_BACKING_STORE = { - 'dir': [ - 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool' - ], - 'lvm': [ - 'zfs_root' - ], - 'btrfs': [ - 'lv_name', 'vg_name', 'thinpool', 'zfs_root', 'fs_type', 'fs_size' - ], - 'loop': [ - 'lv_name', 'vg_name', 'thinpool', 'zfs_root' - ], - 'overlayfs': [ - 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool', 'zfs_root' - ], - 'zfs': [ - 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool' - ] -} - - -# LXC_LOGGING_LEVELS is a map of available log levels -LXC_LOGGING_LEVELS = { - 'INFO': ['info', 'INFO', 'Info'], - 'ERROR': ['error', 'ERROR', 'Error'], - 'DEBUG': ['debug', 'DEBUG', 'Debug'] -} - - -# LXC_ANSIBLE_STATES is a map of states that contain values of methods used -# when a particular state is evoked. -LXC_ANSIBLE_STATES = { - 'started': '_started', - 'stopped': '_stopped', - 'restarted': '_restarted', - 'absent': '_destroyed', - 'frozen': '_frozen', - 'clone': '_clone' -} - - -# This is used to attach to a running container and execute commands from -# within the container on the host. This will provide local access to a -# container without using SSH. The template will attempt to work within the -# home directory of the user that was attached to the container and source -# that users environment variables by default. -ATTACH_TEMPLATE = """#!/usr/bin/env bash -pushd "$(getent passwd $(whoami)|cut -f6 -d':')" - if [[ -f ".bashrc" ]];then - source .bashrc - unset HOSTNAME - fi -popd - -# User defined command -%(container_command)s -""" - - -def create_script(command): - """Write out a script onto a target. - - This method should be backward compatible with Python 2.4+ when executing - from within the container. - - :param command: command to run, this can be a script and can use spacing - with newlines as separation. - :type command: ``str`` - """ - - (fd, script_file) = tempfile.mkstemp(prefix='lxc-attach-script') - f = os.fdopen(fd, 'wb') - try: - f.write(to_bytes(ATTACH_TEMPLATE % {'container_command': command}, errors='surrogate_or_strict')) - f.flush() - finally: - f.close() - - # Ensure the script is executable. - os.chmod(script_file, int('0700', 8)) - - # Output log file. - stdout_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-log')[0], 'ab') - - # Error log file. - stderr_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-err')[0], 'ab') - - # Execute the script command. - try: - subprocess.Popen( - [script_file], - stdout=stdout_file, - stderr=stderr_file - ).communicate() - finally: - # Close the log files. - stderr_file.close() - stdout_file.close() - - # Remove the script file upon completion of execution. - os.remove(script_file) - - -class LxcContainerManagement(object): - def __init__(self, module): - """Management of LXC containers via Ansible. - - :param module: Processed Ansible Module. - :type module: ``object`` - """ - self.module = module - self.state = self.module.params.get('state', None) - self.state_change = False - self.lxc_vg = None - self.lxc_path = self.module.params.get('lxc_path', None) - self.container_name = self.module.params['name'] - self.container = self.get_container_bind() - self.archive_info = None - self.clone_info = None - - def get_container_bind(self): - return lxc.Container(name=self.container_name) - - @staticmethod - def _roundup(num): - """Return a rounded floating point number. - - :param num: Number to round up. - :type: ``float`` - :returns: Rounded up number. - :rtype: ``int`` - """ - num, part = str(num).split('.') - num = int(num) - if int(part) != 0: - num += 1 - return num - - @staticmethod - def _container_exists(container_name, lxc_path=None): - """Check if a container exists. - - :param container_name: Name of the container. - :type: ``str`` - :returns: True or False if the container is found. - :rtype: ``bol`` - """ - if [i for i in lxc.list_containers(config_path=lxc_path) if i == container_name]: - return True - else: - return False - - @staticmethod - def _add_variables(variables_dict, build_command): - """Return a command list with all found options. - - :param variables_dict: Pre-parsed optional variables used from a - seed command. - :type variables_dict: ``dict`` - :param build_command: Command to run. - :type build_command: ``list`` - :returns: list of command options. - :rtype: ``list`` - """ - - for key, value in variables_dict.items(): - build_command.append(str(key)) - build_command.append(str(value)) - return build_command - - def _get_vars(self, variables): - """Return a dict of all variables as found within the module. - - :param variables: Hash of all variables to find. - :type variables: ``dict`` - """ - - # Remove incompatible storage backend options. - variables = variables.copy() - for v in LXC_BACKING_STORE[self.module.params['backing_store']]: - variables.pop(v, None) - - return_dict = dict() - false_values = BOOLEANS_FALSE.union([None, '']) - for k, v in variables.items(): - _var = self.module.params.get(k) - if _var not in false_values: - return_dict[v] = _var - return return_dict - - def _config(self): - """Configure an LXC container. - - Write new configuration values to the lxc config file. This will - stop the container if it's running write the new options and then - restart the container upon completion. - """ - - _container_config = self.module.params.get('container_config') - if not _container_config: - return False - - container_config_file = self.container.config_file_name - with open(container_config_file, 'rb') as f: - container_config = to_text(f.read(), errors='surrogate_or_strict').splitlines(True) - - parsed_options = [i.split('=', 1) for i in _container_config] - config_change = False - for key, value in parsed_options: - key = key.strip() - value = value.strip() - new_entry = '%s = %s\n' % (key, value) - keyre = re.compile(r'%s(\s+)?=' % key) - for option_line in container_config: - # Look for key in config - if keyre.match(option_line): - dummy, _value = option_line.split('=', 1) - config_value = ' '.join(_value.split()) - line_index = container_config.index(option_line) - # If the sanitized values don't match replace them - if value != config_value: - line_index += 1 - if new_entry not in container_config: - config_change = True - container_config.insert(line_index, new_entry) - # Break the flow as values are written or not at this point - break - else: - config_change = True - container_config.append(new_entry) - - # If the config changed restart the container. - if config_change: - container_state = self._get_state() - if container_state != 'stopped': - self.container.stop() - - with open(container_config_file, 'wb') as f: - f.writelines([to_bytes(line, errors='surrogate_or_strict') for line in container_config]) - - self.state_change = True - if container_state == 'running': - self._container_startup() - elif container_state == 'frozen': - self._container_startup() - self.container.freeze() - - def _container_create_clone(self): - """Clone a new LXC container from an existing container. - - This method will clone an existing container to a new container using - the `clone_name` variable as the new container name. The method will - create a container if the container `name` does not exist. - - Note that cloning a container will ensure that the original container - is "stopped" before the clone can be done. Because this operation can - require a state change the method will return the original container - to its prior state upon completion of the clone. - - Once the clone is complete the new container will be left in a stopped - state. - """ - - # Ensure that the state of the original container is stopped - container_state = self._get_state() - if container_state != 'stopped': - self.state_change = True - self.container.stop() - - # lxc-clone is deprecated in favor of lxc-copy - clone_vars = 'variables-lxc-copy' - clone_cmd = self.module.get_bin_path('lxc-copy') - if not clone_cmd: - clone_vars = 'variables-lxc-clone' - clone_cmd = self.module.get_bin_path('lxc-clone', True) - - build_command = [ - clone_cmd, - ] - - build_command = self._add_variables( - variables_dict=self._get_vars( - variables=LXC_COMMAND_MAP['clone'][clone_vars] - ), - build_command=build_command - ) - - # Load logging for the instance when creating it. - if self.module.params.get('clone_snapshot') in BOOLEANS_TRUE: - build_command.append('--snapshot') - # Check for backing_store == overlayfs if so force the use of snapshot - # If overlay fs is used and snapshot is unset the clone command will - # fail with an unsupported type. - elif self.module.params.get('backing_store') == 'overlayfs': - build_command.append('--snapshot') - - rc, return_data, err = self.module.run_command(build_command) - if rc != 0: - message = "Failed executing %s." % os.path.basename(clone_cmd) - self.failure( - err=err, rc=rc, msg=message, command=' '.join( - build_command - ) - ) - else: - self.state_change = True - # Restore the original state of the origin container if it was - # not in a stopped state. - if container_state == 'running': - self.container.start() - elif container_state == 'frozen': - self.container.start() - self.container.freeze() - - return True - - def _create(self): - """Create a new LXC container. - - This method will build and execute a shell command to build the - container. It would have been nice to simply use the lxc python library - however at the time this was written the python library, in both py2 - and py3 didn't support some of the more advanced container create - processes. These missing processes mainly revolve around backing - LXC containers with block devices. - """ - - build_command = [ - self.module.get_bin_path('lxc-create', True), - '--name', self.container_name, - '--quiet' - ] - - build_command = self._add_variables( - variables_dict=self._get_vars( - variables=LXC_COMMAND_MAP['create']['variables'] - ), - build_command=build_command - ) - - # Load logging for the instance when creating it. - if self.module.params.get('container_log') in BOOLEANS_TRUE: - # Set the logging path to the /var/log/lxc if uid is root. else - # set it to the home folder of the user executing. - try: - if os.getuid() != 0: - log_path = os.getenv('HOME') - else: - if not os.path.isdir('/var/log/lxc/'): - os.makedirs('/var/log/lxc/') - log_path = '/var/log/lxc/' - except OSError: - log_path = os.getenv('HOME') - - build_command.extend([ - '--logfile', - os.path.join( - log_path, 'lxc-%s.log' % self.container_name - ), - '--logpriority', - self.module.params.get( - 'container_log_level' - ).upper() - ]) - - # Add the template commands to the end of the command if there are any - template_options = self.module.params.get('template_options', None) - if template_options: - build_command.append('--') - build_command += shlex.split(template_options) - - rc, return_data, err = self.module.run_command(build_command) - if rc != 0: - message = "Failed executing lxc-create." - self.failure( - err=err, rc=rc, msg=message, command=' '.join(build_command) - ) - else: - self.state_change = True - - def _container_data(self): - """Returns a dict of container information. - - :returns: container data - :rtype: ``dict`` - """ - - return { - 'interfaces': self.container.get_interfaces(), - 'ips': self.container.get_ips(), - 'state': self._get_state(), - 'init_pid': int(self.container.init_pid), - 'name': self.container_name, - } - - def _unfreeze(self): - """Unfreeze a container. - - :returns: True or False based on if the container was unfrozen. - :rtype: ``bol`` - """ - - unfreeze = self.container.unfreeze() - if unfreeze: - self.state_change = True - return unfreeze - - def _get_state(self): - """Return the state of a container. - - If the container is not found the state returned is "absent" - - :returns: state of a container as a lower case string. - :rtype: ``str`` - """ - - if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): - return str(self.container.state).lower() - return str('absent') - - def _execute_command(self): - """Execute a shell command.""" - - container_command = self.module.params.get('container_command') - if container_command: - container_state = self._get_state() - if container_state == 'frozen': - self._unfreeze() - elif container_state == 'stopped': - self._container_startup() - - self.container.attach_wait(create_script, container_command) - self.state_change = True - - def _container_startup(self, timeout=60): - """Ensure a container is started. - - :param timeout: Time before the destroy operation is abandoned. - :type timeout: ``int`` - """ - - self.container = self.get_container_bind() - for dummy in xrange(timeout): - if self._get_state() != 'running': - self.container.start() - self.state_change = True - # post startup sleep for 1 second. - time.sleep(1) - else: - return True - self.failure( - lxc_container=self._container_data(), - error='Failed to start container' - ' [ %s ]' % self.container_name, - rc=1, - msg='The container [ %s ] failed to start. Check to lxc is' - ' available and that the container is in a functional' - ' state.' % self.container_name - ) - - def _check_archive(self): - """Create a compressed archive of a container. - - This will store archive_info in as self.archive_info - """ - - if self.module.params.get('archive') in BOOLEANS_TRUE: - self.archive_info = { - 'archive': self._container_create_tar() - } - - def _check_clone(self): - """Create a compressed archive of a container. - - This will store archive_info in as self.archive_info - """ - - clone_name = self.module.params.get('clone_name') - if clone_name: - if not self._container_exists(container_name=clone_name, lxc_path=self.lxc_path): - self.clone_info = { - 'cloned': self._container_create_clone() - } - else: - self.clone_info = { - 'cloned': False - } - - def _destroyed(self, timeout=60): - """Ensure a container is destroyed. - - :param timeout: Time before the destroy operation is abandoned. - :type timeout: ``int`` - """ - - for dummy in xrange(timeout): - if not self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): - break - - # Check if the container needs to have an archive created. - self._check_archive() - - # Check if the container is to be cloned - self._check_clone() - - if self._get_state() != 'stopped': - self.state_change = True - self.container.stop() - - if self.container.destroy(): - self.state_change = True - - # post destroy attempt sleep for 1 second. - time.sleep(1) - else: - self.failure( - lxc_container=self._container_data(), - error='Failed to destroy container' - ' [ %s ]' % self.container_name, - rc=1, - msg='The container [ %s ] failed to be destroyed. Check' - ' that lxc is available and that the container is in a' - ' functional state.' % self.container_name - ) - - def _frozen(self, count=0): - """Ensure a container is frozen. - - If the container does not exist the container will be created. - - :param count: number of times this command has been called by itself. - :type count: ``int`` - """ - - self.check_count(count=count, method='frozen') - if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): - self._execute_command() - - # Perform any configuration updates - self._config() - - container_state = self._get_state() - if container_state == 'frozen': - pass - elif container_state == 'running': - self.container.freeze() - self.state_change = True - else: - self._container_startup() - self.container.freeze() - self.state_change = True - - # Check if the container needs to have an archive created. - self._check_archive() - - # Check if the container is to be cloned - self._check_clone() - else: - self._create() - count += 1 - self._frozen(count) - - def _restarted(self, count=0): - """Ensure a container is restarted. - - If the container does not exist the container will be created. - - :param count: number of times this command has been called by itself. - :type count: ``int`` - """ - - self.check_count(count=count, method='restart') - if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): - self._execute_command() - - # Perform any configuration updates - self._config() - - if self._get_state() != 'stopped': - self.container.stop() - self.state_change = True - - # Run container startup - self._container_startup() - - # Check if the container needs to have an archive created. - self._check_archive() - - # Check if the container is to be cloned - self._check_clone() - else: - self._create() - count += 1 - self._restarted(count) - - def _stopped(self, count=0): - """Ensure a container is stopped. - - If the container does not exist the container will be created. - - :param count: number of times this command has been called by itself. - :type count: ``int`` - """ - - self.check_count(count=count, method='stop') - if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): - self._execute_command() - - # Perform any configuration updates - self._config() - - if self._get_state() != 'stopped': - self.container.stop() - self.state_change = True - - # Check if the container needs to have an archive created. - self._check_archive() - - # Check if the container is to be cloned - self._check_clone() - else: - self._create() - count += 1 - self._stopped(count) - - def _started(self, count=0): - """Ensure a container is started. - - If the container does not exist the container will be created. - - :param count: number of times this command has been called by itself. - :type count: ``int`` - """ - - self.check_count(count=count, method='start') - if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): - container_state = self._get_state() - if container_state == 'running': - pass - elif container_state == 'frozen': - self._unfreeze() - elif not self._container_startup(): - self.failure( - lxc_container=self._container_data(), - error='Failed to start container' - ' [ %s ]' % self.container_name, - rc=1, - msg='The container [ %s ] failed to start. Check to lxc is' - ' available and that the container is in a functional' - ' state.' % self.container_name - ) - - # Return data - self._execute_command() - - # Perform any configuration updates - self._config() - - # Check if the container needs to have an archive created. - self._check_archive() - - # Check if the container is to be cloned - self._check_clone() - else: - self._create() - count += 1 - self._started(count) - - def _get_lxc_vg(self): - """Return the name of the Volume Group used in LXC.""" - - build_command = [ - self.module.get_bin_path('lxc-config', True), - "lxc.bdev.lvm.vg" - ] - rc, vg, err = self.module.run_command(build_command) - if rc != 0: - self.failure( - err=err, - rc=rc, - msg='Failed to read LVM VG from LXC config', - command=' '.join(build_command) - ) - else: - return str(vg.strip()) - - def _lvm_lv_list(self): - """Return a list of all lv in a current vg.""" - - vg = self._get_lxc_vg() - build_command = [ - self.module.get_bin_path('lvs', True) - ] - rc, stdout, err = self.module.run_command(build_command) - if rc != 0: - self.failure( - err=err, - rc=rc, - msg='Failed to get list of LVs', - command=' '.join(build_command) - ) - - all_lvms = [i.split() for i in stdout.splitlines()][1:] - return [lv_entry[0] for lv_entry in all_lvms if lv_entry[1] == vg] - - def _get_vg_free_pe(self, vg_name): - """Return the available size of a given VG. - - :param vg_name: Name of volume. - :type vg_name: ``str`` - :returns: size and measurement of an LV - :type: ``tuple`` - """ - - build_command = [ - 'vgdisplay', - vg_name, - '--units', - 'g' - ] - rc, stdout, err = self.module.run_command(build_command) - if rc != 0: - self.failure( - err=err, - rc=rc, - msg='failed to read vg %s' % vg_name, - command=' '.join(build_command) - ) - - vg_info = [i.strip() for i in stdout.splitlines()][1:] - free_pe = [i for i in vg_info if i.startswith('Free')] - _free_pe = free_pe[0].split() - return float(_free_pe[-2]), _free_pe[-1] - - def _get_lv_size(self, lv_name): - """Return the available size of a given LV. - - :param lv_name: Name of volume. - :type lv_name: ``str`` - :returns: size and measurement of an LV - :type: ``tuple`` - """ - - vg = self._get_lxc_vg() - lv = os.path.join(vg, lv_name) - build_command = [ - 'lvdisplay', - lv, - '--units', - 'g' - ] - rc, stdout, err = self.module.run_command(build_command) - if rc != 0: - self.failure( - err=err, - rc=rc, - msg='failed to read lv %s' % lv, - command=' '.join(build_command) - ) - - lv_info = [i.strip() for i in stdout.splitlines()][1:] - _free_pe = [i for i in lv_info if i.startswith('LV Size')] - free_pe = _free_pe[0].split() - return self._roundup(float(free_pe[-2])), free_pe[-1] - - def _lvm_snapshot_create(self, source_lv, snapshot_name, - snapshot_size_gb=5): - """Create an LVM snapshot. - - :param source_lv: Name of lv to snapshot - :type source_lv: ``str`` - :param snapshot_name: Name of lv snapshot - :type snapshot_name: ``str`` - :param snapshot_size_gb: Size of snapshot to create - :type snapshot_size_gb: ``int`` - """ - - vg = self._get_lxc_vg() - free_space, messurement = self._get_vg_free_pe(vg_name=vg) - - if free_space < float(snapshot_size_gb): - message = ( - 'Snapshot size [ %s ] is > greater than [ %s ] on volume group' - ' [ %s ]' % (snapshot_size_gb, free_space, vg) - ) - self.failure( - error='Not enough space to create snapshot', - rc=2, - msg=message - ) - - # Create LVM Snapshot - build_command = [ - self.module.get_bin_path('lvcreate', True), - "-n", - snapshot_name, - "-s", - os.path.join(vg, source_lv), - "-L%sg" % snapshot_size_gb - ] - rc, stdout, err = self.module.run_command(build_command) - if rc != 0: - self.failure( - err=err, - rc=rc, - msg='Failed to Create LVM snapshot %s/%s --> %s' - % (vg, source_lv, snapshot_name) - ) - - def _lvm_lv_mount(self, lv_name, mount_point): - """mount an lv. - - :param lv_name: name of the logical volume to mount - :type lv_name: ``str`` - :param mount_point: path on the file system that is mounted. - :type mount_point: ``str`` - """ - - vg = self._get_lxc_vg() - - build_command = [ - self.module.get_bin_path('mount', True), - "/dev/%s/%s" % (vg, lv_name), - mount_point, - ] - rc, stdout, err = self.module.run_command(build_command) - if rc != 0: - self.failure( - err=err, - rc=rc, - msg='failed to mountlvm lv %s/%s to %s' - % (vg, lv_name, mount_point) - ) - - def _create_tar(self, source_dir): - """Create an archive of a given ``source_dir`` to ``output_path``. - - :param source_dir: Path to the directory to be archived. - :type source_dir: ``str`` - """ - - old_umask = os.umask(int('0077', 8)) - - archive_path = self.module.params.get('archive_path') - if not os.path.isdir(archive_path): - os.makedirs(archive_path) - - archive_compression = self.module.params.get('archive_compression') - compression_type = LXC_COMPRESSION_MAP[archive_compression] - - # remove trailing / if present. - archive_name = '%s.%s' % ( - os.path.join( - archive_path, - self.container_name - ), - compression_type['extension'] - ) - - build_command = [ - self.module.get_bin_path('tar', True), - '--directory=%s' % os.path.realpath( - os.path.expanduser(source_dir) - ), - compression_type['argument'], - archive_name, - '.' - ] - - rc, stdout, err = self.module.run_command( - build_command - ) - - os.umask(old_umask) - - if rc != 0: - self.failure( - err=err, - rc=rc, - msg='failed to create tar archive', - command=' '.join(build_command) - ) - - return archive_name - - def _lvm_lv_remove(self, lv_name): - """Remove an LV. - - :param lv_name: The name of the logical volume - :type lv_name: ``str`` - """ - - vg = self._get_lxc_vg() - build_command = [ - self.module.get_bin_path('lvremove', True), - "-f", - "%s/%s" % (vg, lv_name), - ] - rc, stdout, err = self.module.run_command(build_command) - if rc != 0: - self.failure( - err=err, - rc=rc, - msg='Failed to remove LVM LV %s/%s' % (vg, lv_name), - command=' '.join(build_command) - ) - - def _rsync_data(self, container_path, temp_dir): - """Sync the container directory to the temp directory. - - :param container_path: path to the container container - :type container_path: ``str`` - :param temp_dir: path to the temporary local working directory - :type temp_dir: ``str`` - """ - # This loop is created to support overlayfs archives. This should - # squash all of the layers into a single archive. - fs_paths = container_path.split(':') - if 'overlayfs' in fs_paths: - fs_paths.pop(fs_paths.index('overlayfs')) - - for fs_path in fs_paths: - # Set the path to the container data - fs_path = os.path.dirname(fs_path) - - # Run the sync command - build_command = [ - self.module.get_bin_path('rsync', True), - '-aHAX', - fs_path, - temp_dir, - ] - rc, stdout, err = self.module.run_command( - build_command, - ) - if rc != 0: - self.failure( - err=err, - rc=rc, - msg='failed to perform archive', - command=' '.join(build_command) - ) - - def _unmount(self, mount_point): - """Unmount a file system. - - :param mount_point: path on the file system that is mounted. - :type mount_point: ``str`` - """ - - build_command = [ - self.module.get_bin_path('umount', True), - mount_point, - ] - rc, stdout, err = self.module.run_command(build_command) - if rc != 0: - self.failure( - err=err, - rc=rc, - msg='failed to unmount [ %s ]' % mount_point, - command=' '.join(build_command) - ) - - def _overlayfs_mount(self, lowerdir, upperdir, mount_point): - """mount an lv. - - :param lowerdir: name/path of the lower directory - :type lowerdir: ``str`` - :param upperdir: name/path of the upper directory - :type upperdir: ``str`` - :param mount_point: path on the file system that is mounted. - :type mount_point: ``str`` - """ - - build_command = [ - self.module.get_bin_path('mount', True), - '-t', 'overlayfs', - '-o', 'lowerdir=%s,upperdir=%s' % (lowerdir, upperdir), - 'overlayfs', - mount_point, - ] - rc, stdout, err = self.module.run_command(build_command) - if rc != 0: - self.failure( - err=err, - rc=rc, - msg='failed to mount overlayfs:%s:%s to %s -- Command: %s' - % (lowerdir, upperdir, mount_point, build_command) - ) - - def _container_create_tar(self): - """Create a tar archive from an LXC container. - - The process is as follows: - * Stop or Freeze the container - * Create temporary dir - * Copy container and config to temporary directory - * If LVM backed: - * Create LVM snapshot of LV backing the container - * Mount the snapshot to tmpdir/rootfs - * Restore the state of the container - * Create tar of tmpdir - * Clean up - """ - - # Create a temp dir - temp_dir = tempfile.mkdtemp() - - # Set the name of the working dir, temp + container_name - work_dir = os.path.join(temp_dir, self.container_name) - - # LXC container rootfs - lxc_rootfs = self.container.get_config_item('lxc.rootfs') - - # Test if the containers rootfs is a block device - block_backed = lxc_rootfs.startswith(os.path.join(os.sep, 'dev')) - - # Test if the container is using overlayfs - overlayfs_backed = lxc_rootfs.startswith('overlayfs') - - mount_point = os.path.join(work_dir, 'rootfs') - - # Set the snapshot name if needed - snapshot_name = '%s_lxc_snapshot' % self.container_name - - container_state = self._get_state() - try: - # Ensure the original container is stopped or frozen - if container_state not in ['stopped', 'frozen']: - if container_state == 'running': - self.container.freeze() - else: - self.container.stop() - - # Sync the container data from the container_path to work_dir - self._rsync_data(lxc_rootfs, temp_dir) - - if block_backed: - if snapshot_name not in self._lvm_lv_list(): - if not os.path.exists(mount_point): - os.makedirs(mount_point) - - # Take snapshot - size, measurement = self._get_lv_size( - lv_name=self.container_name - ) - self._lvm_snapshot_create( - source_lv=self.container_name, - snapshot_name=snapshot_name, - snapshot_size_gb=size - ) - - # Mount snapshot - self._lvm_lv_mount( - lv_name=snapshot_name, - mount_point=mount_point - ) - else: - self.failure( - err='snapshot [ %s ] already exists' % snapshot_name, - rc=1, - msg='The snapshot [ %s ] already exists. Please clean' - ' up old snapshot of containers before continuing.' - % snapshot_name - ) - elif overlayfs_backed: - lowerdir, upperdir = lxc_rootfs.split(':')[1:] - self._overlayfs_mount( - lowerdir=lowerdir, - upperdir=upperdir, - mount_point=mount_point - ) - - # Set the state as changed and set a new fact - self.state_change = True - return self._create_tar(source_dir=work_dir) - finally: - if block_backed or overlayfs_backed: - # unmount snapshot - self._unmount(mount_point) - - if block_backed: - # Remove snapshot - self._lvm_lv_remove(snapshot_name) - - # Restore original state of container - if container_state == 'running': - if self._get_state() == 'frozen': - self.container.unfreeze() - else: - self.container.start() - - # Remove tmpdir - shutil.rmtree(temp_dir) - - def check_count(self, count, method): - if count > 1: - self.failure( - error='Failed to %s container' % method, - rc=1, - msg='The container [ %s ] failed to %s. Check to lxc is' - ' available and that the container is in a functional' - ' state.' % (self.container_name, method) - ) - - def failure(self, **kwargs): - """Return a Failure when running an Ansible command. - - :param error: ``str`` Error that occurred. - :param rc: ``int`` Return code while executing an Ansible command. - :param msg: ``str`` Message to report. - """ - - self.module.fail_json(**kwargs) - - def run(self): - """Run the main method.""" - - action = getattr(self, LXC_ANSIBLE_STATES[self.state]) - action() - - outcome = self._container_data() - if self.archive_info: - outcome.update(self.archive_info) - - if self.clone_info: - outcome.update(self.clone_info) - - self.module.exit_json( - changed=self.state_change, - lxc_container=outcome - ) - - -def main(): - """Ansible Main module.""" - - module = AnsibleModule( - argument_spec=dict( - name=dict( - type='str', - required=True - ), - template=dict( - type='str', - default='ubuntu' - ), - backing_store=dict( - type='str', - choices=list(LXC_BACKING_STORE.keys()), - default='dir' - ), - template_options=dict( - type='str' - ), - config=dict( - type='path', - ), - vg_name=dict( - type='str', - default='lxc' - ), - thinpool=dict( - type='str' - ), - fs_type=dict( - type='str', - default='ext4' - ), - fs_size=dict( - type='str', - default='5G' - ), - directory=dict( - type='path' - ), - zfs_root=dict( - type='str' - ), - lv_name=dict( - type='str' - ), - lxc_path=dict( - type='path' - ), - state=dict( - choices=list(LXC_ANSIBLE_STATES.keys()), - default='started' - ), - container_command=dict( - type='str' - ), - container_config=dict( - type='list', - elements='str' - ), - container_log=dict( - type='bool', - default=False - ), - container_log_level=dict( - choices=[n for i in LXC_LOGGING_LEVELS.values() for n in i], - default='INFO' - ), - clone_name=dict( - type='str', - required=False - ), - clone_snapshot=dict( - type='bool', - default='false' - ), - archive=dict( - type='bool', - default=False - ), - archive_path=dict( - type='path', - ), - archive_compression=dict( - choices=list(LXC_COMPRESSION_MAP.keys()), - default='gzip' - ) - ), - supports_check_mode=False, - required_if=([ - ('archive', True, ['archive_path']) - ]), - ) - - if not HAS_LXC: - module.fail_json( - msg='The `lxc` module is not importable. Check the requirements.' - ) - - lv_name = module.params.get('lv_name') - if not lv_name: - module.params['lv_name'] = module.params.get('name') - - lxc_manage = LxcContainerManagement(module=module) - lxc_manage.run() - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/lxd/lxd_container.py b/ansible_collections/community/general/plugins/modules/cloud/lxd/lxd_container.py deleted file mode 100644 index bd232668..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/lxd/lxd_container.py +++ /dev/null @@ -1,804 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Hiroaki Nakamura -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: lxd_container -short_description: Manage LXD instances -description: - - Management of LXD containers and virtual machines. -author: "Hiroaki Nakamura (@hnakamur)" -options: - name: - description: - - Name of an instance. - type: str - required: true - architecture: - description: - - 'The architecture for the instance (for example C(x86_64) or C(i686)). - See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).' - type: str - required: false - config: - description: - - 'The config for the instance (for example C({"limits.cpu": "2"})). - See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).' - - If the instance already exists and its "config" values in metadata - obtained from the LXD API U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#instances-containers-and-virtual-machines) - are different, this module tries to apply the configurations. - - The keys starting with C(volatile.) are ignored for this comparison when I(ignore_volatile_options=true). - type: dict - required: false - ignore_volatile_options: - description: - - If set to C(true), options starting with C(volatile.) are ignored. As a result, - they are reapplied for each execution. - - This default behavior can be changed by setting this option to C(false). - - The current default value C(true) is deprecated since community.general 4.0.0, - and will change to C(false) in community.general 6.0.0. - type: bool - required: false - version_added: 3.7.0 - profiles: - description: - - Profile to be used by the instance. - type: list - elements: str - devices: - description: - - 'The devices for the instance - (for example C({ "rootfs": { "path": "/dev/kvm", "type": "unix-char" }})). - See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).' - type: dict - required: false - ephemeral: - description: - - Whether or not the instance is ephemeral (for example C(true) or C(false)). - See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1). - required: false - type: bool - source: - description: - - 'The source for the instance - (e.g. { "type": "image", - "mode": "pull", - "server": "https://images.linuxcontainers.org", - "protocol": "lxd", - "alias": "ubuntu/xenial/amd64" }).' - - 'See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1) for complete API documentation.' - - 'Note that C(protocol) accepts two choices: C(lxd) or C(simplestreams).' - required: false - type: dict - state: - choices: - - started - - stopped - - restarted - - absent - - frozen - description: - - Define the state of an instance. - required: false - default: started - type: str - target: - description: - - For cluster deployments. Will attempt to create an instance on a target node. - If the instance exists elsewhere in a cluster, then it will not be replaced or moved. - The name should respond to same name of the node you see in C(lxc cluster list). - type: str - required: false - version_added: 1.0.0 - timeout: - description: - - A timeout for changing the state of the instance. - - This is also used as a timeout for waiting until IPv4 addresses - are set to the all network interfaces in the instance after - starting or restarting. - required: false - default: 30 - type: int - type: - description: - - Instance type can be either C(virtual-machine) or C(container). - required: false - default: container - choices: - - container - - virtual-machine - type: str - version_added: 4.1.0 - wait_for_ipv4_addresses: - description: - - If this is true, the C(lxd_container) waits until IPv4 addresses - are set to the all network interfaces in the instance after - starting or restarting. - required: false - default: false - type: bool - wait_for_container: - description: - - If set to C(true), the tasks will wait till the task reports a - success status when performing container operations. - default: false - type: bool - version_added: 4.4.0 - force_stop: - description: - - If this is true, the C(lxd_container) forces to stop the instance - when it stops or restarts the instance. - required: false - default: false - type: bool - url: - description: - - The unix domain socket path or the https URL for the LXD server. - required: false - default: unix:/var/lib/lxd/unix.socket - type: str - snap_url: - description: - - The unix domain socket path when LXD is installed by snap package manager. - required: false - default: unix:/var/snap/lxd/common/lxd/unix.socket - type: str - client_key: - description: - - The client certificate key file path. - - If not specified, it defaults to C(${HOME}/.config/lxc/client.key). - required: false - aliases: [ key_file ] - type: path - client_cert: - description: - - The client certificate file path. - - If not specified, it defaults to C(${HOME}/.config/lxc/client.crt). - required: false - aliases: [ cert_file ] - type: path - trust_password: - description: - - The client trusted password. - - 'You need to set this password on the LXD server before - running this module using the following command: - C(lxc config set core.trust_password ). - See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).' - - If trust_password is set, this module send a request for - authentication before sending any requests. - required: false - type: str -notes: - - Instances can be a container or a virtual machine, both of them must have unique name. If you attempt to create an instance - with a name that already existed in the users namespace the module will - simply return as "unchanged". - - There are two ways to run commands inside a container or virtual machine, using the command - module or using the ansible lxd connection plugin bundled in Ansible >= - 2.1, the later requires python to be installed in the instance which can - be done with the command module. - - You can copy a file from the host to the instance - with the Ansible M(ansible.builtin.copy) and M(ansible.builtin.template) module and the `lxd` connection plugin. - See the example below. - - You can copy a file in the created instance to the localhost - with `command=lxc file pull instance_name/dir/filename filename`. - See the first example below. -''' - -EXAMPLES = ''' -# An example for creating a Ubuntu container and install python -- hosts: localhost - connection: local - tasks: - - name: Create a started container - community.general.lxd_container: - name: mycontainer - ignore_volatile_options: true - state: started - source: - type: image - mode: pull - server: https://images.linuxcontainers.org - protocol: lxd # if you get a 404, try setting protocol: simplestreams - alias: ubuntu/xenial/amd64 - profiles: ["default"] - wait_for_ipv4_addresses: true - timeout: 600 - - - name: Check python is installed in container - delegate_to: mycontainer - ansible.builtin.raw: dpkg -s python - register: python_install_check - failed_when: python_install_check.rc not in [0, 1] - changed_when: false - - - name: Install python in container - delegate_to: mycontainer - ansible.builtin.raw: apt-get install -y python - when: python_install_check.rc == 1 - -# An example for creating an Ubuntu 14.04 container using an image fingerprint. -# This requires changing 'server' and 'protocol' key values, replacing the -# 'alias' key with with 'fingerprint' and supplying an appropriate value that -# matches the container image you wish to use. -- hosts: localhost - connection: local - tasks: - - name: Create a started container - community.general.lxd_container: - name: mycontainer - ignore_volatile_options: true - state: started - source: - type: image - mode: pull - # Provides current (and older) Ubuntu images with listed fingerprints - server: https://cloud-images.ubuntu.com/releases - # Protocol used by 'ubuntu' remote (as shown by 'lxc remote list') - protocol: simplestreams - # This provides an Ubuntu 14.04 LTS amd64 image from 20150814. - fingerprint: e9a8bdfab6dc - profiles: ["default"] - wait_for_ipv4_addresses: true - timeout: 600 - -# An example for deleting a container -- hosts: localhost - connection: local - tasks: - - name: Delete a container - community.general.lxd_container: - name: mycontainer - state: absent - type: container - -# An example for restarting a container -- hosts: localhost - connection: local - tasks: - - name: Restart a container - community.general.lxd_container: - name: mycontainer - state: restarted - type: container - -# An example for restarting a container using https to connect to the LXD server -- hosts: localhost - connection: local - tasks: - - name: Restart a container - community.general.lxd_container: - url: https://127.0.0.1:8443 - # These client_cert and client_key values are equal to the default values. - #client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt" - #client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key" - trust_password: mypassword - name: mycontainer - state: restarted - -# Note your container must be in the inventory for the below example. -# -# [containers] -# mycontainer ansible_connection=lxd -# -- hosts: - - mycontainer - tasks: - - name: Copy /etc/hosts in the created container to localhost with name "mycontainer-hosts" - ansible.builtin.fetch: - src: /etc/hosts - dest: /tmp/mycontainer-hosts - flat: true - -# An example for LXD cluster deployments. This example will create two new container on specific -# nodes - 'node01' and 'node02'. In 'target:', 'node01' and 'node02' are names of LXD cluster -# members that LXD cluster recognizes, not ansible inventory names, see: 'lxc cluster list'. -# LXD API calls can be made to any LXD member, in this example, we send API requests to -#'node01.example.com', which matches ansible inventory name. -- hosts: node01.example.com - tasks: - - name: Create LXD container - community.general.lxd_container: - name: new-container-1 - ignore_volatile_options: true - state: started - source: - type: image - mode: pull - alias: ubuntu/xenial/amd64 - target: node01 - - - name: Create container on another node - community.general.lxd_container: - name: new-container-2 - ignore_volatile_options: true - state: started - source: - type: image - mode: pull - alias: ubuntu/xenial/amd64 - target: node02 - -# An example for creating a virtual machine -- hosts: localhost - connection: local - tasks: - - name: Create container on another node - community.general.lxd_container: - name: new-vm-1 - type: virtual-machine - state: started - ignore_volatile_options: true - wait_for_ipv4_addresses: true - profiles: ["default"] - source: - protocol: simplestreams - type: image - mode: pull - server: https://images.linuxcontainers.org - alias: debian/11 - timeout: 600 -''' - -RETURN = ''' -addresses: - description: Mapping from the network device name to a list of IPv4 addresses in the instance. - returned: when state is started or restarted - type: dict - sample: {"eth0": ["10.155.92.191"]} -old_state: - description: The old state of the instance. - returned: when state is started or restarted - type: str - sample: "stopped" -logs: - description: The logs of requests and responses. - returned: when ansible-playbook is invoked with -vvvv. - type: list - sample: "(too long to be placed here)" -actions: - description: List of actions performed for the instance. - returned: success - type: list - sample: '["create", "start"]' -''' -import datetime -import os -import time - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException -from ansible.module_utils.six.moves.urllib.parse import urlencode - -# LXD_ANSIBLE_STATES is a map of states that contain values of methods used -# when a particular state is evoked. -LXD_ANSIBLE_STATES = { - 'started': '_started', - 'stopped': '_stopped', - 'restarted': '_restarted', - 'absent': '_destroyed', - 'frozen': '_frozen' -} - -# ANSIBLE_LXD_STATES is a map of states of lxd containers to the Ansible -# lxc_container module state parameter value. -ANSIBLE_LXD_STATES = { - 'Running': 'started', - 'Stopped': 'stopped', - 'Frozen': 'frozen', -} - -# ANSIBLE_LXD_DEFAULT_URL is a default value of the lxd endpoint -ANSIBLE_LXD_DEFAULT_URL = 'unix:/var/lib/lxd/unix.socket' - -# CONFIG_PARAMS is a list of config attribute names. -CONFIG_PARAMS = [ - 'architecture', 'config', 'devices', 'ephemeral', 'profiles', 'source' -] - - -class LXDContainerManagement(object): - def __init__(self, module): - """Management of LXC containers via Ansible. - - :param module: Processed Ansible Module. - :type module: ``object`` - """ - self.module = module - self.name = self.module.params['name'] - self._build_config() - - self.state = self.module.params['state'] - - self.timeout = self.module.params['timeout'] - self.wait_for_ipv4_addresses = self.module.params['wait_for_ipv4_addresses'] - self.force_stop = self.module.params['force_stop'] - self.addresses = None - self.target = self.module.params['target'] - self.wait_for_container = self.module.params['wait_for_container'] - - self.type = self.module.params['type'] - - # LXD Rest API provides additional endpoints for creating containers and virtual-machines. - self.api_endpoint = None - if self.type == 'container': - self.api_endpoint = '/1.0/containers' - elif self.type == 'virtual-machine': - self.api_endpoint = '/1.0/virtual-machines' - - self.key_file = self.module.params.get('client_key') - if self.key_file is None: - self.key_file = '{0}/.config/lxc/client.key'.format(os.environ['HOME']) - self.cert_file = self.module.params.get('client_cert') - if self.cert_file is None: - self.cert_file = '{0}/.config/lxc/client.crt'.format(os.environ['HOME']) - self.debug = self.module._verbosity >= 4 - - try: - if self.module.params['url'] != ANSIBLE_LXD_DEFAULT_URL: - self.url = self.module.params['url'] - elif os.path.exists(self.module.params['snap_url'].replace('unix:', '')): - self.url = self.module.params['snap_url'] - else: - self.url = self.module.params['url'] - except Exception as e: - self.module.fail_json(msg=e.msg) - - try: - self.client = LXDClient( - self.url, key_file=self.key_file, cert_file=self.cert_file, - debug=self.debug - ) - except LXDClientException as e: - self.module.fail_json(msg=e.msg) - self.trust_password = self.module.params.get('trust_password', None) - self.actions = [] - - def _build_config(self): - self.config = {} - for attr in CONFIG_PARAMS: - param_val = self.module.params.get(attr, None) - if param_val is not None: - self.config[attr] = param_val - - def _get_instance_json(self): - return self.client.do( - 'GET', '{0}/{1}'.format(self.api_endpoint, self.name), - ok_error_codes=[404] - ) - - def _get_instance_state_json(self): - return self.client.do( - 'GET', '{0}/{1}/state'.format(self.api_endpoint, self.name), - ok_error_codes=[404] - ) - - @staticmethod - def _instance_json_to_module_state(resp_json): - if resp_json['type'] == 'error': - return 'absent' - return ANSIBLE_LXD_STATES[resp_json['metadata']['status']] - - def _change_state(self, action, force_stop=False): - body_json = {'action': action, 'timeout': self.timeout} - if force_stop: - body_json['force'] = True - return self.client.do('PUT', '{0}/{1}/state'.format(self.api_endpoint, self.name), body_json=body_json) - - def _create_instance(self): - config = self.config.copy() - config['name'] = self.name - if self.target: - self.client.do('POST', '{0}?{1}'.format(self.api_endpoint, urlencode(dict(target=self.target))), config, wait_for_container=self.wait_for_container) - else: - self.client.do('POST', self.api_endpoint, config, wait_for_container=self.wait_for_container) - self.actions.append('create') - - def _start_instance(self): - self._change_state('start') - self.actions.append('start') - - def _stop_instance(self): - self._change_state('stop', self.force_stop) - self.actions.append('stop') - - def _restart_instance(self): - self._change_state('restart', self.force_stop) - self.actions.append('restart') - - def _delete_instance(self): - self.client.do('DELETE', '{0}/{1}'.format(self.api_endpoint, self.name)) - self.actions.append('delete') - - def _freeze_instance(self): - self._change_state('freeze') - self.actions.append('freeze') - - def _unfreeze_instance(self): - self._change_state('unfreeze') - self.actions.append('unfreez') - - def _instance_ipv4_addresses(self, ignore_devices=None): - ignore_devices = ['lo'] if ignore_devices is None else ignore_devices - - resp_json = self._get_instance_state_json() - network = resp_json['metadata']['network'] or {} - network = dict((k, v) for k, v in network.items() if k not in ignore_devices) or {} - addresses = dict((k, [a['address'] for a in v['addresses'] if a['family'] == 'inet']) for k, v in network.items()) or {} - return addresses - - @staticmethod - def _has_all_ipv4_addresses(addresses): - return len(addresses) > 0 and all(len(v) > 0 for v in addresses.values()) - - def _get_addresses(self): - try: - due = datetime.datetime.now() + datetime.timedelta(seconds=self.timeout) - while datetime.datetime.now() < due: - time.sleep(1) - addresses = self._instance_ipv4_addresses() - if self._has_all_ipv4_addresses(addresses): - self.addresses = addresses - return - except LXDClientException as e: - e.msg = 'timeout for getting IPv4 addresses' - raise - - def _started(self): - if self.old_state == 'absent': - self._create_instance() - self._start_instance() - else: - if self.old_state == 'frozen': - self._unfreeze_instance() - elif self.old_state == 'stopped': - self._start_instance() - if self._needs_to_apply_instance_configs(): - self._apply_instance_configs() - if self.wait_for_ipv4_addresses: - self._get_addresses() - - def _stopped(self): - if self.old_state == 'absent': - self._create_instance() - else: - if self.old_state == 'stopped': - if self._needs_to_apply_instance_configs(): - self._start_instance() - self._apply_instance_configs() - self._stop_instance() - else: - if self.old_state == 'frozen': - self._unfreeze_instance() - if self._needs_to_apply_instance_configs(): - self._apply_instance_configs() - self._stop_instance() - - def _restarted(self): - if self.old_state == 'absent': - self._create_instance() - self._start_instance() - else: - if self.old_state == 'frozen': - self._unfreeze_instance() - if self._needs_to_apply_instance_configs(): - self._apply_instance_configs() - self._restart_instance() - if self.wait_for_ipv4_addresses: - self._get_addresses() - - def _destroyed(self): - if self.old_state != 'absent': - if self.old_state == 'frozen': - self._unfreeze_instance() - if self.old_state != 'stopped': - self._stop_instance() - self._delete_instance() - - def _frozen(self): - if self.old_state == 'absent': - self._create_instance() - self._start_instance() - self._freeze_instance() - else: - if self.old_state == 'stopped': - self._start_instance() - if self._needs_to_apply_instance_configs(): - self._apply_instance_configs() - self._freeze_instance() - - def _needs_to_change_instance_config(self, key): - if key not in self.config: - return False - if key == 'config' and self.ignore_volatile_options: # the old behavior is to ignore configurations by keyword "volatile" - old_configs = dict((k, v) for k, v in self.old_instance_json['metadata'][key].items() if not k.startswith('volatile.')) - for k, v in self.config['config'].items(): - if k not in old_configs: - return True - if old_configs[k] != v: - return True - return False - elif key == 'config': # next default behavior - old_configs = dict((k, v) for k, v in self.old_instance_json['metadata'][key].items()) - for k, v in self.config['config'].items(): - if k not in old_configs: - return True - if old_configs[k] != v: - return True - return False - else: - old_configs = self.old_instance_json['metadata'][key] - return self.config[key] != old_configs - - def _needs_to_apply_instance_configs(self): - return ( - self._needs_to_change_instance_config('architecture') or - self._needs_to_change_instance_config('config') or - self._needs_to_change_instance_config('ephemeral') or - self._needs_to_change_instance_config('devices') or - self._needs_to_change_instance_config('profiles') - ) - - def _apply_instance_configs(self): - old_metadata = self.old_instance_json['metadata'] - body_json = { - 'architecture': old_metadata['architecture'], - 'config': old_metadata['config'], - 'devices': old_metadata['devices'], - 'profiles': old_metadata['profiles'] - } - - if self._needs_to_change_instance_config('architecture'): - body_json['architecture'] = self.config['architecture'] - if self._needs_to_change_instance_config('config'): - for k, v in self.config['config'].items(): - body_json['config'][k] = v - if self._needs_to_change_instance_config('ephemeral'): - body_json['ephemeral'] = self.config['ephemeral'] - if self._needs_to_change_instance_config('devices'): - body_json['devices'] = self.config['devices'] - if self._needs_to_change_instance_config('profiles'): - body_json['profiles'] = self.config['profiles'] - - self.client.do('PUT', '{0}/{1}'.format(self.api_endpoint, self.name), body_json=body_json) - self.actions.append('apply_instance_configs') - - def run(self): - """Run the main method.""" - - try: - if self.trust_password is not None: - self.client.authenticate(self.trust_password) - self.ignore_volatile_options = self.module.params.get('ignore_volatile_options') - - self.old_instance_json = self._get_instance_json() - self.old_state = self._instance_json_to_module_state(self.old_instance_json) - action = getattr(self, LXD_ANSIBLE_STATES[self.state]) - action() - - state_changed = len(self.actions) > 0 - result_json = { - 'log_verbosity': self.module._verbosity, - 'changed': state_changed, - 'old_state': self.old_state, - 'actions': self.actions - } - if self.client.debug: - result_json['logs'] = self.client.logs - if self.addresses is not None: - result_json['addresses'] = self.addresses - self.module.exit_json(**result_json) - except LXDClientException as e: - state_changed = len(self.actions) > 0 - fail_params = { - 'msg': e.msg, - 'changed': state_changed, - 'actions': self.actions - } - if self.client.debug: - fail_params['logs'] = e.kwargs['logs'] - self.module.fail_json(**fail_params) - - -def main(): - """Ansible Main module.""" - - module = AnsibleModule( - argument_spec=dict( - name=dict( - type='str', - required=True - ), - architecture=dict( - type='str', - ), - config=dict( - type='dict', - ), - ignore_volatile_options=dict( - type='bool', - ), - devices=dict( - type='dict', - ), - ephemeral=dict( - type='bool', - ), - profiles=dict( - type='list', - elements='str', - ), - source=dict( - type='dict', - ), - state=dict( - choices=list(LXD_ANSIBLE_STATES.keys()), - default='started' - ), - target=dict( - type='str', - ), - timeout=dict( - type='int', - default=30 - ), - type=dict( - type='str', - default='container', - choices=['container', 'virtual-machine'], - ), - wait_for_container=dict( - type='bool', - default=False - ), - wait_for_ipv4_addresses=dict( - type='bool', - default=False - ), - force_stop=dict( - type='bool', - default=False - ), - url=dict( - type='str', - default=ANSIBLE_LXD_DEFAULT_URL - ), - snap_url=dict( - type='str', - default='unix:/var/snap/lxd/common/lxd/unix.socket' - ), - client_key=dict( - type='path', - aliases=['key_file'] - ), - client_cert=dict( - type='path', - aliases=['cert_file'] - ), - trust_password=dict(type='str', no_log=True) - ), - supports_check_mode=False, - ) - - if module.params['ignore_volatile_options'] is None: - module.params['ignore_volatile_options'] = True - module.deprecate( - 'If the keyword "volatile" is used in a playbook in the config' - 'section, a "changed" message will appear with every run, even without a change' - 'to the playbook.' - 'This will change in the future. Please test your scripts' - 'by "ignore_volatile_options: false". To keep the old behavior, set that option explicitly to "true"', - version='6.0.0', collection_name='community.general') - - lxd_manage = LXDContainerManagement(module=module) - lxd_manage.run() - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/lxd/lxd_profile.py b/ansible_collections/community/general/plugins/modules/cloud/lxd/lxd_profile.py deleted file mode 100644 index 3094898f..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/lxd/lxd_profile.py +++ /dev/null @@ -1,518 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Hiroaki Nakamura -# Copyright: (c) 2020, Frank Dornheim -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: lxd_profile -short_description: Manage LXD profiles -description: - - Management of LXD profiles -author: "Hiroaki Nakamura (@hnakamur)" -options: - name: - description: - - Name of a profile. - required: true - type: str - description: - description: - - Description of the profile. - type: str - config: - description: - - 'The config for the container (e.g. {"limits.memory": "4GB"}). - See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)' - - If the profile already exists and its "config" value in metadata - obtained from - GET /1.0/profiles/ - U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#get-19) - are different, they this module tries to apply the configurations. - - Not all config values are supported to apply the existing profile. - Maybe you need to delete and recreate a profile. - required: false - type: dict - devices: - description: - - 'The devices for the profile - (e.g. {"rootfs": {"path": "/dev/kvm", "type": "unix-char"}). - See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)' - required: false - type: dict - new_name: - description: - - A new name of a profile. - - If this parameter is specified a profile will be renamed to this name. - See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-11) - required: false - type: str - merge_profile: - description: - - Merge the configuration of the present profile with the new desired configuration, - instead of replacing it. - required: false - default: false - type: bool - version_added: 2.1.0 - state: - choices: - - present - - absent - description: - - Define the state of a profile. - required: false - default: present - type: str - url: - description: - - The unix domain socket path or the https URL for the LXD server. - required: false - default: unix:/var/lib/lxd/unix.socket - type: str - snap_url: - description: - - The unix domain socket path when LXD is installed by snap package manager. - required: false - default: unix:/var/snap/lxd/common/lxd/unix.socket - type: str - client_key: - description: - - The client certificate key file path. - - If not specified, it defaults to C($HOME/.config/lxc/client.key). - required: false - aliases: [ key_file ] - type: path - client_cert: - description: - - The client certificate file path. - - If not specified, it defaults to C($HOME/.config/lxc/client.crt). - required: false - aliases: [ cert_file ] - type: path - trust_password: - description: - - The client trusted password. - - You need to set this password on the LXD server before - running this module using the following command. - lxc config set core.trust_password - See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/) - - If trust_password is set, this module send a request for - authentication before sending any requests. - required: false - type: str -notes: - - Profiles must have a unique name. If you attempt to create a profile - with a name that already existed in the users namespace the module will - simply return as "unchanged". -''' - -EXAMPLES = ''' -# An example for creating a profile -- hosts: localhost - connection: local - tasks: - - name: Create a profile - community.general.lxd_profile: - name: macvlan - state: present - config: {} - description: my macvlan profile - devices: - eth0: - nictype: macvlan - parent: br0 - type: nic - -# An example for creating a profile via http connection -- hosts: localhost - connection: local - tasks: - - name: Create macvlan profile - community.general.lxd_profile: - url: https://127.0.0.1:8443 - # These client_cert and client_key values are equal to the default values. - #client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt" - #client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key" - trust_password: mypassword - name: macvlan - state: present - config: {} - description: my macvlan profile - devices: - eth0: - nictype: macvlan - parent: br0 - type: nic - -# An example for modify/merge a profile -- hosts: localhost - connection: local - tasks: - - name: Merge a profile - community.general.lxd_profile: - merge_profile: true - name: macvlan - state: present - config: {} - description: my macvlan profile - devices: - eth0: - nictype: macvlan - parent: br0 - type: nic - -# An example for deleting a profile -- hosts: localhost - connection: local - tasks: - - name: Delete a profile - community.general.lxd_profile: - name: macvlan - state: absent - -# An example for renaming a profile -- hosts: localhost - connection: local - tasks: - - name: Rename a profile - community.general.lxd_profile: - name: macvlan - new_name: macvlan2 - state: present -''' - -RETURN = ''' -old_state: - description: The old state of the profile - returned: success - type: str - sample: "absent" -logs: - description: The logs of requests and responses. - returned: when ansible-playbook is invoked with -vvvv. - type: list - sample: "(too long to be placed here)" -actions: - description: List of actions performed for the profile. - returned: success - type: list - sample: '["create"]' -''' - -import os -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException - -# ANSIBLE_LXD_DEFAULT_URL is a default value of the lxd endpoint -ANSIBLE_LXD_DEFAULT_URL = 'unix:/var/lib/lxd/unix.socket' - -# PROFILE_STATES is a list for states supported -PROFILES_STATES = [ - 'present', 'absent' -] - -# CONFIG_PARAMS is a list of config attribute names. -CONFIG_PARAMS = [ - 'config', 'description', 'devices' -] - - -class LXDProfileManagement(object): - def __init__(self, module): - """Management of LXC containers via Ansible. - - :param module: Processed Ansible Module. - :type module: ``object`` - """ - self.module = module - self.name = self.module.params['name'] - self._build_config() - self.state = self.module.params['state'] - self.new_name = self.module.params.get('new_name', None) - - self.key_file = self.module.params.get('client_key') - if self.key_file is None: - self.key_file = '{0}/.config/lxc/client.key'.format(os.environ['HOME']) - self.cert_file = self.module.params.get('client_cert') - if self.cert_file is None: - self.cert_file = '{0}/.config/lxc/client.crt'.format(os.environ['HOME']) - self.debug = self.module._verbosity >= 4 - - try: - if self.module.params['url'] != ANSIBLE_LXD_DEFAULT_URL: - self.url = self.module.params['url'] - elif os.path.exists(self.module.params['snap_url'].replace('unix:', '')): - self.url = self.module.params['snap_url'] - else: - self.url = self.module.params['url'] - except Exception as e: - self.module.fail_json(msg=e.msg) - - try: - self.client = LXDClient( - self.url, key_file=self.key_file, cert_file=self.cert_file, - debug=self.debug - ) - except LXDClientException as e: - self.module.fail_json(msg=e.msg) - self.trust_password = self.module.params.get('trust_password', None) - self.actions = [] - - def _build_config(self): - self.config = {} - for attr in CONFIG_PARAMS: - param_val = self.module.params.get(attr, None) - if param_val is not None: - self.config[attr] = param_val - - def _get_profile_json(self): - return self.client.do( - 'GET', '/1.0/profiles/{0}'.format(self.name), - ok_error_codes=[404] - ) - - @staticmethod - def _profile_json_to_module_state(resp_json): - if resp_json['type'] == 'error': - return 'absent' - return 'present' - - def _update_profile(self): - if self.state == 'present': - if self.old_state == 'absent': - if self.new_name is None: - self._create_profile() - else: - self.module.fail_json( - msg='new_name must not be set when the profile does not exist and the state is present', - changed=False) - else: - if self.new_name is not None and self.new_name != self.name: - self._rename_profile() - if self._needs_to_apply_profile_configs(): - self._apply_profile_configs() - elif self.state == 'absent': - if self.old_state == 'present': - if self.new_name is None: - self._delete_profile() - else: - self.module.fail_json( - msg='new_name must not be set when the profile exists and the specified state is absent', - changed=False) - - def _create_profile(self): - config = self.config.copy() - config['name'] = self.name - self.client.do('POST', '/1.0/profiles', config) - self.actions.append('create') - - def _rename_profile(self): - config = {'name': self.new_name} - self.client.do('POST', '/1.0/profiles/{0}'.format(self.name), config) - self.actions.append('rename') - self.name = self.new_name - - def _needs_to_change_profile_config(self, key): - if key not in self.config: - return False - old_configs = self.old_profile_json['metadata'].get(key, None) - return self.config[key] != old_configs - - def _needs_to_apply_profile_configs(self): - return ( - self._needs_to_change_profile_config('config') or - self._needs_to_change_profile_config('description') or - self._needs_to_change_profile_config('devices') - ) - - def _merge_dicts(self, source, destination): - """Merge Dictionarys - - Get a list of filehandle numbers from logger to be handed to - DaemonContext.files_preserve - - Args: - dict(source): source dict - dict(destination): destination dict - Kwargs: - None - Raises: - None - Returns: - dict(destination): merged dict""" - for key, value in source.items(): - if isinstance(value, dict): - # get node or create one - node = destination.setdefault(key, {}) - self._merge_dicts(value, node) - else: - destination[key] = value - return destination - - def _merge_config(self, config): - """ merge profile - - Merge Configuration of the present profile and the new desired configitems - - Args: - dict(config): Dict with the old config in 'metadata' and new config in 'config' - Kwargs: - None - Raises: - None - Returns: - dict(config): new config""" - # merge or copy the sections from the existing profile to 'config' - for item in ['config', 'description', 'devices', 'name', 'used_by']: - if item in config: - config[item] = self._merge_dicts(config['metadata'][item], config[item]) - else: - config[item] = config['metadata'][item] - # merge or copy the sections from the ansible-task to 'config' - return self._merge_dicts(self.config, config) - - def _generate_new_config(self, config): - """ rebuild profile - - Rebuild the Profile by the configuration provided in the play. - Existing configurations are discarded. - - This ist the default behavior. - - Args: - dict(config): Dict with the old config in 'metadata' and new config in 'config' - Kwargs: - None - Raises: - None - Returns: - dict(config): new config""" - for k, v in self.config.items(): - config[k] = v - return config - - def _apply_profile_configs(self): - """ Selection of the procedure: rebuild or merge - - The standard behavior is that all information not contained - in the play is discarded. - - If "merge_profile" is provides in the play and "True", then existing - configurations from the profile and new ones defined are merged. - - Args: - None - Kwargs: - None - Raises: - None - Returns: - None""" - config = self.old_profile_json.copy() - if self.module.params['merge_profile']: - config = self._merge_config(config) - else: - config = self._generate_new_config(config) - - # upload config to lxd - self.client.do('PUT', '/1.0/profiles/{0}'.format(self.name), config) - self.actions.append('apply_profile_configs') - - def _delete_profile(self): - self.client.do('DELETE', '/1.0/profiles/{0}'.format(self.name)) - self.actions.append('delete') - - def run(self): - """Run the main method.""" - - try: - if self.trust_password is not None: - self.client.authenticate(self.trust_password) - - self.old_profile_json = self._get_profile_json() - self.old_state = self._profile_json_to_module_state(self.old_profile_json) - self._update_profile() - - state_changed = len(self.actions) > 0 - result_json = { - 'changed': state_changed, - 'old_state': self.old_state, - 'actions': self.actions - } - if self.client.debug: - result_json['logs'] = self.client.logs - self.module.exit_json(**result_json) - except LXDClientException as e: - state_changed = len(self.actions) > 0 - fail_params = { - 'msg': e.msg, - 'changed': state_changed, - 'actions': self.actions - } - if self.client.debug: - fail_params['logs'] = e.kwargs['logs'] - self.module.fail_json(**fail_params) - - -def main(): - """Ansible Main module.""" - - module = AnsibleModule( - argument_spec=dict( - name=dict( - type='str', - required=True - ), - new_name=dict( - type='str', - ), - config=dict( - type='dict', - ), - description=dict( - type='str', - ), - devices=dict( - type='dict', - ), - merge_profile=dict( - type='bool', - default=False - ), - state=dict( - choices=PROFILES_STATES, - default='present' - ), - url=dict( - type='str', - default=ANSIBLE_LXD_DEFAULT_URL - ), - snap_url=dict( - type='str', - default='unix:/var/snap/lxd/common/lxd/unix.socket' - ), - client_key=dict( - type='path', - aliases=['key_file'] - ), - client_cert=dict( - type='path', - aliases=['cert_file'] - ), - trust_password=dict(type='str', no_log=True) - ), - supports_check_mode=False, - ) - - lxd_manage = LXDProfileManagement(module=module) - lxd_manage.run() - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/memset/memset_dns_reload.py b/ansible_collections/community/general/plugins/modules/cloud/memset/memset_dns_reload.py deleted file mode 100644 index 6eefe133..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/memset/memset_dns_reload.py +++ /dev/null @@ -1,183 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2018, Simon Weald -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: memset_dns_reload -author: "Simon Weald (@glitchcrab)" -short_description: Request reload of Memset's DNS infrastructure, -notes: - - DNS reload requests are a best-effort service provided by Memset; these generally - happen every 15 minutes by default, however you can request an immediate reload if - later tasks rely on the records being created. An API key generated via the - Memset customer control panel is required with the following minimum scope - - I(dns.reload). If you wish to poll the job status to wait until the reload has - completed, then I(job.status) is also required. -description: - - Request a reload of Memset's DNS infrastructure, and optionally poll until it finishes. -options: - api_key: - required: true - type: str - description: - - The API key obtained from the Memset control panel. - poll: - default: false - type: bool - description: - - Boolean value, if set will poll the reload job's status and return - when the job has completed (unless the 30 second timeout is reached first). - If the timeout is reached then the task will not be marked as failed, but - stderr will indicate that the polling failed. -''' - -EXAMPLES = ''' -- name: Submit DNS reload and poll - community.general.memset_dns_reload: - api_key: 5eb86c9196ab03919abcf03857163741 - poll: True - delegate_to: localhost -''' - -RETURN = ''' ---- -memset_api: - description: Raw response from the Memset API. - returned: always - type: complex - contains: - error: - description: Whether the job ended in error state. - returned: always - type: bool - sample: true - finished: - description: Whether the job completed before the result was returned. - returned: always - type: bool - sample: true - id: - description: Job ID. - returned: always - type: str - sample: "c9cc8ad2a3e3fb8c63ed83c424928ef8" - status: - description: Job status. - returned: always - type: str - sample: "DONE" - type: - description: Job type. - returned: always - type: str - sample: "dns" -''' - -from time import sleep - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call - - -def poll_reload_status(api_key=None, job_id=None, payload=None): - ''' - We poll the `job.status` endpoint every 5 seconds up to a - maximum of 6 times. This is a relatively arbitrary choice of - timeout, however requests rarely take longer than 15 seconds - to complete. - ''' - memset_api, stderr, msg = None, None, None - payload['id'] = job_id - - api_method = 'job.status' - _has_failed, _msg, response = memset_api_call(api_key=api_key, api_method=api_method, payload=payload) - - while not response.json()['finished']: - counter = 0 - while counter < 6: - sleep(5) - _has_failed, msg, response = memset_api_call(api_key=api_key, api_method=api_method, payload=payload) - counter += 1 - if response.json()['error']: - # the reload job was submitted but polling failed. Don't return this as an overall task failure. - stderr = "Reload submitted successfully, but the Memset API returned a job error when attempting to poll the reload status." - else: - memset_api = response.json() - msg = None - - return(memset_api, msg, stderr) - - -def reload_dns(args=None): - ''' - DNS reloads are a single API call and therefore there's not much - which can go wrong outside of auth errors. - ''' - retvals, payload = dict(), dict() - has_changed, has_failed = False, False - memset_api, msg, stderr = None, None, None - - api_method = 'dns.reload' - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) - - if has_failed: - # this is the first time the API is called; incorrect credentials will - # manifest themselves at this point so we need to ensure the user is - # informed of the reason. - retvals['failed'] = has_failed - retvals['memset_api'] = response.json() - retvals['msg'] = msg - return(retvals) - - # set changed to true if the reload request was accepted. - has_changed = True - memset_api = msg - # empty msg var as we don't want to return the API's json response twice. - msg = None - - if args['poll']: - # hand off to the poll function. - job_id = response.json()['id'] - memset_api, msg, stderr = poll_reload_status(api_key=args['api_key'], job_id=job_id, payload=payload) - - # assemble return variables. - retvals['failed'] = has_failed - retvals['changed'] = has_changed - for val in ['msg', 'stderr', 'memset_api']: - if val is not None: - retvals[val] = eval(val) - - return(retvals) - - -def main(): - global module - module = AnsibleModule( - argument_spec=dict( - api_key=dict(required=True, type='str', no_log=True), - poll=dict(required=False, default=False, type='bool') - ), - supports_check_mode=False - ) - - # populate the dict with the user-provided vars. - args = dict() - for key, arg in module.params.items(): - args[key] = arg - - retvals = reload_dns(args) - - if retvals['failed']: - module.fail_json(**retvals) - else: - module.exit_json(**retvals) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/memset/memset_memstore_info.py b/ansible_collections/community/general/plugins/modules/cloud/memset/memset_memstore_info.py deleted file mode 100644 index e880b460..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/memset/memset_memstore_info.py +++ /dev/null @@ -1,169 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2018, Simon Weald -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: memset_memstore_info -author: "Simon Weald (@glitchcrab)" -short_description: Retrieve Memstore product usage information. -notes: - - An API key generated via the Memset customer control panel is needed with the - following minimum scope - I(memstore.usage). -description: - - Retrieve Memstore product usage information. - - This module was called C(memset_memstore_facts) before Ansible 2.9. The usage did not change. -options: - api_key: - required: true - type: str - description: - - The API key obtained from the Memset control panel. - name: - required: true - type: str - description: - - The Memstore product name (i.e. C(mstestyaa1)). -''' - -EXAMPLES = ''' -- name: Get usage for mstestyaa1 - community.general.memset_memstore_info: - name: mstestyaa1 - api_key: 5eb86c9896ab03919abcf03857163741 - delegate_to: localhost -''' - -RETURN = ''' ---- -memset_api: - description: Info from the Memset API - returned: always - type: complex - contains: - cdn_bandwidth: - description: Dictionary of CDN bandwidth facts - returned: always - type: complex - contains: - bytes_out: - description: Outbound CDN bandwidth for the last 24 hours in bytes - returned: always - type: int - sample: 1000 - requests: - description: Number of requests in the last 24 hours - returned: always - type: int - sample: 10 - bytes_in: - description: Inbound CDN bandwidth for the last 24 hours in bytes - returned: always - type: int - sample: 1000 - containers: - description: Number of containers - returned: always - type: int - sample: 10 - bytes: - description: Space used in bytes - returned: always - type: int - sample: 3860997965 - objs: - description: Number of objects - returned: always - type: int - sample: 1000 - bandwidth: - description: Dictionary of CDN bandwidth facts - returned: always - type: complex - contains: - bytes_out: - description: Outbound bandwidth for the last 24 hours in bytes - returned: always - type: int - sample: 1000 - requests: - description: Number of requests in the last 24 hours - returned: always - type: int - sample: 10 - bytes_in: - description: Inbound bandwidth for the last 24 hours in bytes - returned: always - type: int - sample: 1000 -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call - - -def get_facts(args=None): - ''' - Performs a simple API call and returns a JSON blob. - ''' - retvals, payload = dict(), dict() - has_changed, has_failed = False, False - msg, stderr, memset_api = None, None, None - - payload['name'] = args['name'] - - api_method = 'memstore.usage' - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) - - if has_failed: - # this is the first time the API is called; incorrect credentials will - # manifest themselves at this point so we need to ensure the user is - # informed of the reason. - retvals['failed'] = has_failed - retvals['msg'] = msg - retvals['stderr'] = "API returned an error: {0}" . format(response.status_code) - return(retvals) - - # we don't want to return the same thing twice - msg = None - memset_api = response.json() - - retvals['changed'] = has_changed - retvals['failed'] = has_failed - for val in ['msg', 'memset_api']: - if val is not None: - retvals[val] = eval(val) - - return(retvals) - - -def main(): - global module - module = AnsibleModule( - argument_spec=dict( - api_key=dict(required=True, type='str', no_log=True), - name=dict(required=True, type='str') - ), - supports_check_mode=True, - ) - - # populate the dict with the user-provided vars. - args = dict() - for key, arg in module.params.items(): - args[key] = arg - - retvals = get_facts(args) - - if retvals['failed']: - module.fail_json(**retvals) - else: - module.exit_json(**retvals) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/memset/memset_server_info.py b/ansible_collections/community/general/plugins/modules/cloud/memset/memset_server_info.py deleted file mode 100644 index 853e2c88..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/memset/memset_server_info.py +++ /dev/null @@ -1,294 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2018, Simon Weald -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: memset_server_info -author: "Simon Weald (@glitchcrab)" -short_description: Retrieve server information. -notes: - - An API key generated via the Memset customer control panel is needed with the - following minimum scope - I(server.info). -description: - - Retrieve server information. - - This module was called C(memset_server_facts) before Ansible 2.9. The usage did not change. -options: - api_key: - required: true - type: str - description: - - The API key obtained from the Memset control panel. - name: - required: true - type: str - description: - - The server product name (i.e. C(testyaa1)). -''' - -EXAMPLES = ''' -- name: Get details for testyaa1 - community.general.memset_server_info: - name: testyaa1 - api_key: 5eb86c9896ab03919abcf03857163741 - delegate_to: localhost -''' - -RETURN = ''' ---- -memset_api: - description: Info from the Memset API - returned: always - type: complex - contains: - backups: - description: Whether this server has a backup service. - returned: always - type: bool - sample: true - control_panel: - description: Whether the server has a control panel (i.e. cPanel). - returned: always - type: str - sample: 'cpanel' - data_zone: - description: The data zone the server is in. - returned: always - type: str - sample: 'Memset Public Cloud' - expiry_date: - description: Current expiry date of the server. - returned: always - type: str - sample: '2018-08-10' - firewall_rule_group: - description: Details about the firewall group this server is in. - returned: always - type: dict - sample: { - "default_outbound_policy": "RETURN", - "name": "testyaa-fw1", - "nickname": "testyaa cPanel rules", - "notes": "", - "public": false, - "rules": { - "51d7db54d39c3544ef7c48baa0b9944f": { - "action": "ACCEPT", - "comment": "", - "dest_ip6s": "any", - "dest_ips": "any", - "dest_ports": "any", - "direction": "Inbound", - "ip_version": "any", - "ordering": 2, - "protocols": "icmp", - "rule_group_name": "testyaa-fw1", - "rule_id": "51d7db54d39c3544ef7c48baa0b9944f", - "source_ip6s": "any", - "source_ips": "any", - "source_ports": "any" - } - } - } - firewall_type: - description: The type of firewall the server has (i.e. self-managed, managed). - returned: always - type: str - sample: 'managed' - host_name: - description: The server's hostname. - returned: always - type: str - sample: 'testyaa1.miniserver.com' - ignore_monitoring_off: - description: When true, Memset won't remind the customer that monitoring is disabled. - returned: always - type: bool - sample: true - ips: - description: List of dictionaries of all IP addresses assigned to the server. - returned: always - type: list - sample: [ - { - "address": "1.2.3.4", - "bytes_in_today": 1000.0, - "bytes_in_yesterday": 2000.0, - "bytes_out_today": 1000.0, - "bytes_out_yesterday": 2000.0 - } - ] - monitor: - description: Whether the server has monitoring enabled. - returned: always - type: bool - sample: true - monitoring_level: - description: The server's monitoring level (i.e. basic). - returned: always - type: str - sample: 'basic' - name: - description: Server name (same as the service name). - returned: always - type: str - sample: 'testyaa1' - network_zones: - description: The network zone(s) the server is in. - returned: always - type: list - sample: [ 'reading' ] - nickname: - description: Customer-set nickname for the server. - returned: always - type: str - sample: 'database server' - no_auto_reboot: - description: Whether or not to reboot the server if monitoring detects it down. - returned: always - type: bool - sample: true - no_nrpe: - description: Whether Memset should use NRPE to monitor this server. - returned: always - type: bool - sample: true - os: - description: The server's Operating System. - returned: always - type: str - sample: 'debian_stretch_64' - penetration_patrol: - description: Intrusion detection support level for this server. - returned: always - type: str - sample: 'managed' - penetration_patrol_alert_level: - description: The alert level at which notifications are sent. - returned: always - type: int - sample: 10 - primary_ip: - description: Server's primary IP. - returned: always - type: str - sample: '1.2.3.4' - renewal_price_amount: - description: Renewal cost for the server. - returned: always - type: str - sample: '30.00' - renewal_price_currency: - description: Currency for renewal payments. - returned: always - type: str - sample: 'GBP' - renewal_price_vat: - description: VAT rate for renewal payments - returned: always - type: str - sample: '20' - start_date: - description: Server's start date. - returned: always - type: str - sample: '2013-04-10' - status: - description: Current status of the server (i.e. live, onhold). - returned: always - type: str - sample: 'LIVE' - support_level: - description: Support level included with the server. - returned: always - type: str - sample: 'managed' - type: - description: What this server is (i.e. dedicated) - returned: always - type: str - sample: 'miniserver' - vlans: - description: Dictionary of tagged and untagged VLANs this server is in. - returned: always - type: dict - sample: { - tagged: [], - untagged: [ 'testyaa-vlan1', 'testyaa-vlan2' ] - } - vulnscan: - description: Vulnerability scanning level. - returned: always - type: str - sample: 'basic' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call - - -def get_facts(args=None): - ''' - Performs a simple API call and returns a JSON blob. - ''' - retvals, payload = dict(), dict() - has_changed, has_failed = False, False - msg, stderr, memset_api = None, None, None - - payload['name'] = args['name'] - - api_method = 'server.info' - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) - - if has_failed: - # this is the first time the API is called; incorrect credentials will - # manifest themselves at this point so we need to ensure the user is - # informed of the reason. - retvals['failed'] = has_failed - retvals['msg'] = msg - retvals['stderr'] = "API returned an error: {0}" . format(response.status_code) - return(retvals) - - # we don't want to return the same thing twice - msg = None - memset_api = response.json() - - retvals['changed'] = has_changed - retvals['failed'] = has_failed - for val in ['msg', 'memset_api']: - if val is not None: - retvals[val] = eval(val) - - return(retvals) - - -def main(): - global module - module = AnsibleModule( - argument_spec=dict( - api_key=dict(required=True, type='str', no_log=True), - name=dict(required=True, type='str') - ), - supports_check_mode=True, - ) - - # populate the dict with the user-provided vars. - args = dict() - for key, arg in module.params.items(): - args[key] = arg - - retvals = get_facts(args) - - if retvals['failed']: - module.fail_json(**retvals) - else: - module.exit_json(**retvals) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone.py b/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone.py deleted file mode 100644 index 9ef798bd..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone.py +++ /dev/null @@ -1,311 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2018, Simon Weald -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: memset_zone -author: "Simon Weald (@glitchcrab)" -short_description: Creates and deletes Memset DNS zones. -notes: - - Zones can be thought of as a logical group of domains, all of which share the - same DNS records (i.e. they point to the same IP). An API key generated via the - Memset customer control panel is needed with the following minimum scope - - I(dns.zone_create), I(dns.zone_delete), I(dns.zone_list). -description: - - Manage DNS zones in a Memset account. -options: - state: - required: true - description: - - Indicates desired state of resource. - type: str - choices: [ absent, present ] - api_key: - required: true - description: - - The API key obtained from the Memset control panel. - type: str - name: - required: true - description: - - The zone nickname; usually the same as the main domain. Ensure this - value has at most 250 characters. - type: str - aliases: [ nickname ] - ttl: - description: - - The default TTL for all records created in the zone. This must be a - valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_create). - type: int - choices: [ 0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400 ] - force: - required: false - default: false - type: bool - description: - - Forces deletion of a zone and all zone domains/zone records it contains. -''' - -EXAMPLES = ''' -# Create the zone 'test' -- name: Create zone - community.general.memset_zone: - name: test - state: present - api_key: 5eb86c9196ab03919abcf03857163741 - ttl: 300 - delegate_to: localhost - -# Force zone deletion -- name: Force delete zone - community.general.memset_zone: - name: test - state: absent - api_key: 5eb86c9196ab03919abcf03857163741 - force: true - delegate_to: localhost -''' - -RETURN = ''' -memset_api: - description: Zone info from the Memset API - returned: when state == present - type: complex - contains: - domains: - description: List of domains in this zone - returned: always - type: list - sample: [] - id: - description: Zone id - returned: always - type: str - sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c" - nickname: - description: Zone name - returned: always - type: str - sample: "example.com" - records: - description: List of DNS records for domains in this zone - returned: always - type: list - sample: [] - ttl: - description: Default TTL for domains in this zone - returned: always - type: int - sample: 300 -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.memset import check_zone -from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id -from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call - - -def api_validation(args=None): - ''' - Perform some validation which will be enforced by Memset's API (see: - https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create) - ''' - # zone domain length must be less than 250 chars. - if len(args['name']) > 250: - stderr = 'Zone name must be less than 250 characters in length.' - module.fail_json(failed=True, msg=stderr, stderr=stderr) - - -def check(args=None): - ''' - Support for running with check mode. - ''' - retvals = dict() - - api_method = 'dns.zone_list' - has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) - - zone_exists, counter = check_zone(data=response, name=args['name']) - - # set changed to true if the operation would cause a change. - has_changed = ((zone_exists and args['state'] == 'absent') or (not zone_exists and args['state'] == 'present')) - - retvals['changed'] = has_changed - retvals['failed'] = has_failed - - return(retvals) - - -def create_zone(args=None, zone_exists=None, payload=None): - ''' - At this point we already know whether the zone exists, so we - just need to make the API reflect the desired state. - ''' - has_changed, has_failed = False, False - msg, memset_api = None, None - - if not zone_exists: - payload['ttl'] = args['ttl'] - payload['nickname'] = args['name'] - api_method = 'dns.zone_create' - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) - if not has_failed: - has_changed = True - else: - api_method = 'dns.zone_list' - _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) - for zone in response.json(): - if zone['nickname'] == args['name']: - break - if zone['ttl'] != args['ttl']: - # update the zone if the desired TTL is different. - payload['id'] = zone['id'] - payload['ttl'] = args['ttl'] - api_method = 'dns.zone_update' - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) - if not has_failed: - has_changed = True - - # populate return var with zone info. - api_method = 'dns.zone_list' - _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) - - zone_exists, msg, counter, zone_id = get_zone_id(zone_name=args['name'], current_zones=response.json()) - - if zone_exists: - payload = dict() - payload['id'] = zone_id - api_method = 'dns.zone_info' - _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) - memset_api = response.json() - - return(has_failed, has_changed, memset_api, msg) - - -def delete_zone(args=None, zone_exists=None, payload=None): - ''' - Deletion requires extra sanity checking as the zone cannot be - deleted if it contains domains or records. Setting force=true - will override this behaviour. - ''' - has_changed, has_failed = False, False - msg, memset_api = None, None - - if zone_exists: - api_method = 'dns.zone_list' - _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) - counter = 0 - for zone in response.json(): - if zone['nickname'] == args['name']: - counter += 1 - if counter == 1: - for zone in response.json(): - if zone['nickname'] == args['name']: - zone_id = zone['id'] - domain_count = len(zone['domains']) - record_count = len(zone['records']) - if (domain_count > 0 or record_count > 0) and args['force'] is False: - # we need to fail out if force was not explicitly set. - stderr = 'Zone contains domains or records and force was not used.' - has_failed = True - has_changed = False - module.fail_json(failed=has_failed, changed=has_changed, msg=msg, stderr=stderr, rc=1) - api_method = 'dns.zone_delete' - payload['id'] = zone_id - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) - if not has_failed: - has_changed = True - # return raw JSON from API in named var and then unset msg var so we aren't returning the same thing twice. - memset_api = msg - msg = None - else: - # zone names are not unique, so we cannot safely delete the requested - # zone at this time. - has_failed = True - has_changed = False - msg = 'Unable to delete zone as multiple zones with the same name exist.' - else: - has_failed, has_changed = False, False - - return(has_failed, has_changed, memset_api, msg) - - -def create_or_delete(args=None): - ''' - We need to perform some initial sanity checking and also look - up required info before handing it off to create or delete. - ''' - retvals, payload = dict(), dict() - has_failed, has_changed = False, False - msg, memset_api, stderr = None, None, None - - # get the zones and check if the relevant zone exists. - api_method = 'dns.zone_list' - _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) - if _has_failed: - # this is the first time the API is called; incorrect credentials will - # manifest themselves at this point so we need to ensure the user is - # informed of the reason. - retvals['failed'] = _has_failed - retvals['msg'] = _msg - - return(retvals) - - zone_exists, _msg, counter, _zone_id = get_zone_id(zone_name=args['name'], current_zones=response.json()) - - if args['state'] == 'present': - has_failed, has_changed, memset_api, msg = create_zone(args=args, zone_exists=zone_exists, payload=payload) - - elif args['state'] == 'absent': - has_failed, has_changed, memset_api, msg = delete_zone(args=args, zone_exists=zone_exists, payload=payload) - - retvals['failed'] = has_failed - retvals['changed'] = has_changed - for val in ['msg', 'stderr', 'memset_api']: - if val is not None: - retvals[val] = eval(val) - - return(retvals) - - -def main(): - global module - module = AnsibleModule( - argument_spec=dict( - state=dict(required=True, choices=['present', 'absent'], type='str'), - api_key=dict(required=True, type='str', no_log=True), - name=dict(required=True, aliases=['nickname'], type='str'), - ttl=dict(required=False, default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'), - force=dict(required=False, default=False, type='bool') - ), - supports_check_mode=True - ) - - # populate the dict with the user-provided vars. - args = dict() - for key, arg in module.params.items(): - args[key] = arg - args['check_mode'] = module.check_mode - - # validate some API-specific limitations. - api_validation(args=args) - - if module.check_mode: - retvals = check(args) - else: - retvals = create_or_delete(args) - - if retvals['failed']: - module.fail_json(**retvals) - else: - module.exit_json(**retvals) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone_domain.py b/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone_domain.py deleted file mode 100644 index 4aa0eada..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone_domain.py +++ /dev/null @@ -1,266 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2018, Simon Weald -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: memset_zone_domain -author: "Simon Weald (@glitchcrab)" -short_description: Create and delete domains in Memset DNS zones. -notes: - - Zone domains can be thought of as a collection of domains, all of which share the - same DNS records (i.e. they point to the same IP). An API key generated via the - Memset customer control panel is needed with the following minimum scope - - I(dns.zone_domain_create), I(dns.zone_domain_delete), I(dns.zone_domain_list). - - Currently this module can only create one domain at a time. Multiple domains should - be created using C(with_items). -description: - - Manage DNS zone domains in a Memset account. -options: - state: - default: present - description: - - Indicates desired state of resource. - type: str - choices: [ absent, present ] - api_key: - required: true - description: - - The API key obtained from the Memset control panel. - type: str - domain: - required: true - description: - - The zone domain name. Ensure this value has at most 250 characters. - type: str - aliases: ['name'] - zone: - required: true - description: - - The zone to add the domain to (this must already exist). - type: str -''' - -EXAMPLES = ''' -# Create the zone domain 'test.com' -- name: Create zone domain - community.general.memset_zone_domain: - domain: test.com - zone: testzone - state: present - api_key: 5eb86c9196ab03919abcf03857163741 - delegate_to: localhost -''' - -RETURN = ''' -memset_api: - description: Domain info from the Memset API - returned: when changed or state == present - type: complex - contains: - domain: - description: Domain name - returned: always - type: str - sample: "example.com" - id: - description: Domain ID - returned: always - type: str - sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c" -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id -from ansible_collections.community.general.plugins.module_utils.memset import check_zone_domain -from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call - - -def api_validation(args=None): - ''' - Perform some validation which will be enforced by Memset's API (see: - https://www.memset.com/apidocs/methods_dns.html#dns.zone_domain_create) - ''' - # zone domain length must be less than 250 chars - if len(args['domain']) > 250: - stderr = 'Zone domain must be less than 250 characters in length.' - module.fail_json(failed=True, msg=stderr) - - -def check(args=None): - ''' - Support for running with check mode. - ''' - retvals = dict() - has_changed = False - - api_method = 'dns.zone_domain_list' - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) - - domain_exists = check_zone_domain(data=response, domain=args['domain']) - - # set changed to true if the operation would cause a change. - has_changed = ((domain_exists and args['state'] == 'absent') or (not domain_exists and args['state'] == 'present')) - - retvals['changed'] = has_changed - retvals['failed'] = has_failed - - return(retvals) - - -def create_zone_domain(args=None, zone_exists=None, zone_id=None, payload=None): - ''' - At this point we already know whether the containing zone exists, - so we just need to create the domain (or exit if it already exists). - ''' - has_changed, has_failed = False, False - msg = None - - api_method = 'dns.zone_domain_list' - _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) - - for zone_domain in response.json(): - if zone_domain['domain'] == args['domain']: - # zone domain already exists, nothing to change. - has_changed = False - break - else: - # we need to create the domain - api_method = 'dns.zone_domain_create' - payload['domain'] = args['domain'] - payload['zone_id'] = zone_id - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) - if not has_failed: - has_changed = True - - return(has_failed, has_changed, msg) - - -def delete_zone_domain(args=None, payload=None): - ''' - Deletion is pretty simple, domains are always unique so we - we don't need to do any sanity checking to avoid deleting the - wrong thing. - ''' - has_changed, has_failed = False, False - msg, memset_api = None, None - - api_method = 'dns.zone_domain_list' - _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) - - domain_exists = check_zone_domain(data=response, domain=args['domain']) - - if domain_exists: - api_method = 'dns.zone_domain_delete' - payload['domain'] = args['domain'] - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) - if not has_failed: - has_changed = True - memset_api = response.json() - # unset msg as we don't want to return unnecessary info to the user. - msg = None - - return(has_failed, has_changed, memset_api, msg) - - -def create_or_delete_domain(args=None): - ''' - We need to perform some initial sanity checking and also look - up required info before handing it off to create or delete. - ''' - retvals, payload = dict(), dict() - has_changed, has_failed = False, False - msg, stderr, memset_api = None, None, None - - # get the zones and check if the relevant zone exists. - api_method = 'dns.zone_list' - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) - - if has_failed: - # this is the first time the API is called; incorrect credentials will - # manifest themselves at this point so we need to ensure the user is - # informed of the reason. - retvals['failed'] = has_failed - retvals['msg'] = msg - retvals['stderr'] = "API returned an error: {0}" . format(response.status_code) - return(retvals) - - zone_exists, msg, counter, zone_id = get_zone_id(zone_name=args['zone'], current_zones=response.json()) - - if not zone_exists: - # the zone needs to be unique - this isn't a requirement of Memset's API but it - # makes sense in the context of this module. - has_failed = True - if counter == 0: - stderr = "DNS zone '{0}' does not exist, cannot create domain." . format(args['zone']) - elif counter > 1: - stderr = "{0} matches multiple zones, cannot create domain." . format(args['zone']) - - retvals['failed'] = has_failed - retvals['msg'] = stderr - return(retvals) - - if args['state'] == 'present': - has_failed, has_changed, msg = create_zone_domain(args=args, zone_exists=zone_exists, zone_id=zone_id, payload=payload) - - if args['state'] == 'absent': - has_failed, has_changed, memset_api, msg = delete_zone_domain(args=args, payload=payload) - - retvals['changed'] = has_changed - retvals['failed'] = has_failed - for val in ['msg', 'stderr', 'memset_api']: - if val is not None: - retvals[val] = eval(val) - - return(retvals) - - -def main(): - global module - module = AnsibleModule( - argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], type='str'), - api_key=dict(required=True, type='str', no_log=True), - domain=dict(required=True, aliases=['name'], type='str'), - zone=dict(required=True, type='str') - ), - supports_check_mode=True - ) - - # populate the dict with the user-provided vars. - args = dict() - for key, arg in module.params.items(): - args[key] = arg - args['check_mode'] = module.check_mode - - # validate some API-specific limitations. - api_validation(args=args) - - if module.check_mode: - retvals = check(args) - else: - retvals = create_or_delete_domain(args) - - # we would need to populate the return values with the API's response - # in several places so it's easier to do it at the end instead. - if not retvals['failed']: - if args['state'] == 'present' and not module.check_mode: - payload = dict() - payload['domain'] = args['domain'] - api_method = 'dns.zone_domain_info' - _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) - retvals['memset_api'] = response.json() - - if retvals['failed']: - module.fail_json(**retvals) - else: - module.exit_json(**retvals) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone_record.py b/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone_record.py deleted file mode 100644 index 981d2ac4..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone_record.py +++ /dev/null @@ -1,380 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2018, Simon Weald -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: memset_zone_record -author: "Simon Weald (@glitchcrab)" -short_description: Create and delete records in Memset DNS zones. -notes: - - Zones can be thought of as a logical group of domains, all of which share the - same DNS records (i.e. they point to the same IP). An API key generated via the - Memset customer control panel is needed with the following minimum scope - - I(dns.zone_create), I(dns.zone_delete), I(dns.zone_list). - - Currently this module can only create one DNS record at a time. Multiple records - should be created using C(with_items). -description: - - Manage DNS records in a Memset account. -options: - state: - default: present - description: - - Indicates desired state of resource. - type: str - choices: [ absent, present ] - api_key: - required: true - description: - - The API key obtained from the Memset control panel. - type: str - address: - required: true - description: - - The address for this record (can be IP or text string depending on record type). - type: str - aliases: [ ip, data ] - priority: - description: - - C(SRV) and C(TXT) record priority, in the range 0 > 999 (inclusive). - type: int - record: - required: false - description: - - The subdomain to create. - type: str - type: - required: true - description: - - The type of DNS record to create. - choices: [ A, AAAA, CNAME, MX, NS, SRV, TXT ] - type: str - relative: - type: bool - default: false - description: - - If set then the current domain is added onto the address field for C(CNAME), C(MX), C(NS) - and C(SRV)record types. - ttl: - description: - - The record's TTL in seconds (will inherit zone's TTL if not explicitly set). This must be a - valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create). - choices: [ 0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400 ] - type: int - zone: - required: true - description: - - The name of the zone to which to add the record to. - type: str -''' - -EXAMPLES = ''' -# Create DNS record for www.domain.com -- name: Create DNS record - community.general.memset_zone_record: - api_key: dcf089a2896940da9ffefb307ef49ccd - state: present - zone: domain.com - type: A - record: www - address: 1.2.3.4 - ttl: 300 - relative: false - delegate_to: localhost - -# create an SPF record for domain.com -- name: Create SPF record for domain.com - community.general.memset_zone_record: - api_key: dcf089a2896940da9ffefb307ef49ccd - state: present - zone: domain.com - type: TXT - address: "v=spf1 +a +mx +ip4:a1.2.3.4 ?all" - delegate_to: localhost - -# create multiple DNS records -- name: Create multiple DNS records - community.general.memset_zone_record: - api_key: dcf089a2896940da9ffefb307ef49ccd - zone: "{{ item.zone }}" - type: "{{ item.type }}" - record: "{{ item.record }}" - address: "{{ item.address }}" - delegate_to: localhost - with_items: - - { 'zone': 'domain1.com', 'type': 'A', 'record': 'www', 'address': '1.2.3.4' } - - { 'zone': 'domain2.com', 'type': 'A', 'record': 'mail', 'address': '4.3.2.1' } -''' - -RETURN = ''' -memset_api: - description: Record info from the Memset API. - returned: when state == present - type: complex - contains: - address: - description: Record content (may be an IP, string or blank depending on record type). - returned: always - type: str - sample: 1.1.1.1 - id: - description: Record ID. - returned: always - type: str - sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c" - priority: - description: Priority for C(MX) and C(SRV) records. - returned: always - type: int - sample: 10 - record: - description: Name of record. - returned: always - type: str - sample: "www" - relative: - description: Adds the current domain onto the address field for C(CNAME), C(MX), C(NS) and C(SRV) types. - returned: always - type: bool - sample: False - ttl: - description: Record TTL. - returned: always - type: int - sample: 10 - type: - description: Record type. - returned: always - type: str - sample: AAAA - zone_id: - description: Zone ID. - returned: always - type: str - sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c" -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id -from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call -from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id - - -def api_validation(args=None): - ''' - Perform some validation which will be enforced by Memset's API (see: - https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create) - ''' - failed_validation = False - - # priority can only be integer 0 > 999 - if not 0 <= args['priority'] <= 999: - failed_validation = True - error = 'Priority must be in the range 0 > 999 (inclusive).' - # data value must be max 250 chars - if len(args['address']) > 250: - failed_validation = True - error = "Address must be less than 250 characters in length." - # record value must be max 250 chars - if args['record']: - if len(args['record']) > 63: - failed_validation = True - error = "Record must be less than 63 characters in length." - # relative isn't used for all record types - if args['relative']: - if args['type'] not in ['CNAME', 'MX', 'NS', 'SRV']: - failed_validation = True - error = "Relative is only valid for CNAME, MX, NS and SRV record types." - # if any of the above failed then fail early - if failed_validation: - module.fail_json(failed=True, msg=error) - - -def create_zone_record(args=None, zone_id=None, records=None, payload=None): - ''' - Sanity checking has already occurred prior to this function being - called, so we can go ahead and either create or update the record. - As defaults are defined for all values in the argument_spec, this - may cause some changes to occur as the defaults are enforced (if - the user has only configured required variables). - ''' - has_changed, has_failed = False, False - msg, memset_api = None, None - - # assemble the new record. - new_record = dict() - new_record['zone_id'] = zone_id - for arg in ['priority', 'address', 'relative', 'record', 'ttl', 'type']: - new_record[arg] = args[arg] - - # if we have any matches, update them. - if records: - for zone_record in records: - # record exists, add ID to payload. - new_record['id'] = zone_record['id'] - if zone_record == new_record: - # nothing to do; record is already correct so we populate - # the return var with the existing record's details. - memset_api = zone_record - return(has_changed, has_failed, memset_api, msg) - else: - # merge dicts ensuring we change any updated values - payload = zone_record.copy() - payload.update(new_record) - api_method = 'dns.zone_record_update' - if args['check_mode']: - has_changed = True - # return the new record to the user in the returned var. - memset_api = new_record - return(has_changed, has_failed, memset_api, msg) - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) - if not has_failed: - has_changed = True - memset_api = new_record - # empty msg as we don't want to return a boatload of json to the user. - msg = None - else: - # no record found, so we need to create it - api_method = 'dns.zone_record_create' - payload = new_record - if args['check_mode']: - has_changed = True - # populate the return var with the new record's details. - memset_api = new_record - return(has_changed, has_failed, memset_api, msg) - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) - if not has_failed: - has_changed = True - memset_api = new_record - # empty msg as we don't want to return a boatload of json to the user. - msg = None - - return(has_changed, has_failed, memset_api, msg) - - -def delete_zone_record(args=None, records=None, payload=None): - ''' - Matching records can be cleanly deleted without affecting other - resource types, so this is pretty simple to achieve. - ''' - has_changed, has_failed = False, False - msg, memset_api = None, None - - # if we have any matches, delete them. - if records: - for zone_record in records: - if args['check_mode']: - has_changed = True - return(has_changed, has_failed, memset_api, msg) - payload['id'] = zone_record['id'] - api_method = 'dns.zone_record_delete' - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) - if not has_failed: - has_changed = True - memset_api = zone_record - # empty msg as we don't want to return a boatload of json to the user. - msg = None - - return(has_changed, has_failed, memset_api, msg) - - -def create_or_delete(args=None): - ''' - We need to perform some initial sanity checking and also look - up required info before handing it off to create or delete functions. - Check mode is integrated into the create or delete functions. - ''' - has_failed, has_changed = False, False - msg, memset_api, stderr = None, None, None - retvals, payload = dict(), dict() - - # get the zones and check if the relevant zone exists. - api_method = 'dns.zone_list' - _has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) - - if _has_failed: - # this is the first time the API is called; incorrect credentials will - # manifest themselves at this point so we need to ensure the user is - # informed of the reason. - retvals['failed'] = _has_failed - retvals['msg'] = msg - retvals['stderr'] = "API returned an error: {0}" . format(response.status_code) - return(retvals) - - zone_exists, _msg, counter, zone_id = get_zone_id(zone_name=args['zone'], current_zones=response.json()) - - if not zone_exists: - has_failed = True - if counter == 0: - stderr = "DNS zone {0} does not exist." . format(args['zone']) - elif counter > 1: - stderr = "{0} matches multiple zones." . format(args['zone']) - retvals['failed'] = has_failed - retvals['msg'] = stderr - retvals['stderr'] = stderr - return(retvals) - - # get a list of all records ( as we can't limit records by zone) - api_method = 'dns.zone_record_list' - _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) - - # find any matching records - records = [record for record in response.json() if record['zone_id'] == zone_id - and record['record'] == args['record'] and record['type'] == args['type']] - - if args['state'] == 'present': - has_changed, has_failed, memset_api, msg = create_zone_record(args=args, zone_id=zone_id, records=records, payload=payload) - - if args['state'] == 'absent': - has_changed, has_failed, memset_api, msg = delete_zone_record(args=args, records=records, payload=payload) - - retvals['changed'] = has_changed - retvals['failed'] = has_failed - for val in ['msg', 'stderr', 'memset_api']: - if val is not None: - retvals[val] = eval(val) - - return(retvals) - - -def main(): - global module - module = AnsibleModule( - argument_spec=dict( - state=dict(required=False, default='present', choices=['present', 'absent'], type='str'), - api_key=dict(required=True, type='str', no_log=True), - zone=dict(required=True, type='str'), - type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SRV', 'TXT'], type='str'), - address=dict(required=True, aliases=['ip', 'data'], type='str'), - record=dict(required=False, default='', type='str'), - ttl=dict(required=False, default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'), - priority=dict(required=False, default=0, type='int'), - relative=dict(required=False, default=False, type='bool') - ), - supports_check_mode=True - ) - - # populate the dict with the user-provided vars. - args = dict() - for key, arg in module.params.items(): - args[key] = arg - args['check_mode'] = module.check_mode - - # perform some Memset API-specific validation - api_validation(args=args) - - retvals = create_or_delete(args) - - if retvals['failed']: - module.fail_json(**retvals) - else: - module.exit_json(**retvals) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/misc/cloud_init_data_facts.py b/ansible_collections/community/general/plugins/modules/cloud/misc/cloud_init_data_facts.py deleted file mode 100644 index 1b44c50c..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/misc/cloud_init_data_facts.py +++ /dev/null @@ -1,129 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, René Moser -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: cloud_init_data_facts -short_description: Retrieve facts of cloud-init. -description: - - Gathers facts by reading the status.json and result.json of cloud-init. -author: René Moser (@resmo) -options: - filter: - description: - - Filter facts - type: str - choices: [ status, result ] -notes: - - See http://cloudinit.readthedocs.io/ for more information about cloud-init. -''' - -EXAMPLES = ''' -- name: Gather all facts of cloud init - community.general.cloud_init_data_facts: - register: result - -- ansible.builtin.debug: - var: result - -- name: Wait for cloud init to finish - community.general.cloud_init_data_facts: - filter: status - register: res - until: "res.cloud_init_data_facts.status.v1.stage is defined and not res.cloud_init_data_facts.status.v1.stage" - retries: 50 - delay: 5 -''' - -RETURN = ''' ---- -cloud_init_data_facts: - description: Facts of result and status. - returned: success - type: dict - sample: '{ - "status": { - "v1": { - "datasource": "DataSourceCloudStack", - "errors": [] - }, - "result": { - "v1": { - "datasource": "DataSourceCloudStack", - "init": { - "errors": [], - "finished": 1522066377.0185432, - "start": 1522066375.2648022 - }, - "init-local": { - "errors": [], - "finished": 1522066373.70919, - "start": 1522066373.4726632 - }, - "modules-config": { - "errors": [], - "finished": 1522066380.9097016, - "start": 1522066379.0011985 - }, - "modules-final": { - "errors": [], - "finished": 1522066383.56594, - "start": 1522066382.3449218 - }, - "stage": null - } - }' -''' - -import os - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_text - - -CLOUD_INIT_PATH = "/var/lib/cloud/data" - - -def gather_cloud_init_data_facts(module): - res = { - 'cloud_init_data_facts': dict() - } - - for i in ['result', 'status']: - filter = module.params.get('filter') - if filter is None or filter == i: - res['cloud_init_data_facts'][i] = dict() - json_file = os.path.join(CLOUD_INIT_PATH, i + '.json') - - if os.path.exists(json_file): - f = open(json_file, 'rb') - contents = to_text(f.read(), errors='surrogate_or_strict') - f.close() - - if contents: - res['cloud_init_data_facts'][i] = module.from_json(contents) - return res - - -def main(): - module = AnsibleModule( - argument_spec=dict( - filter=dict(choices=['result', 'status']), - ), - supports_check_mode=True, - ) - - facts = gather_cloud_init_data_facts(module) - result = dict(changed=False, ansible_facts=facts, **facts) - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox.py b/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox.py deleted file mode 100644 index 662e8348..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox.py +++ /dev/null @@ -1,780 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: proxmox -short_description: management of instances in Proxmox VE cluster -description: - - allows you to create/delete/stop instances in Proxmox VE cluster - - Starting in Ansible 2.1, it automatically detects containerization type (lxc for PVE 4, openvz for older) - - Since community.general 4.0.0 on, there are no more default values, see I(proxmox_default_behavior). -options: - password: - description: - - the instance root password - type: str - hostname: - description: - - the instance hostname - - required only for C(state=present) - - must be unique if vmid is not passed - type: str - ostemplate: - description: - - the template for VM creating - - required only for C(state=present) - type: str - disk: - description: - - This option was previously described as "hard disk size in GB for instance" however several formats describing - a lxc mount are permitted. - - Older versions of Proxmox will accept a numeric value for size using the I(storage) parameter to automatically - choose which storage to allocate from, however new versions enforce the C(:) syntax. - - "Additional options are available by using some combination of the following key-value pairs as a - comma-delimited list C([volume=] [,acl=<1|0>] [,mountoptions=] [,quota=<1|0>] - [,replicate=<1|0>] [,ro=<1|0>] [,shared=<1|0>] [,size=])." - - See U(https://pve.proxmox.com/wiki/Linux_Container) for a full description. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(3). - type: str - cores: - description: - - Specify number of cores per socket. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1). - type: int - cpus: - description: - - numbers of allocated cpus for instance - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1). - type: int - memory: - description: - - memory size in MB for instance - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(512). - type: int - swap: - description: - - swap memory size in MB for instance - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(0). - type: int - netif: - description: - - specifies network interfaces for the container. As a hash/dictionary defining interfaces. - type: dict - features: - description: - - Specifies a list of features to be enabled. For valid options, see U(https://pve.proxmox.com/wiki/Linux_Container#pct_options). - - Some features require the use of a privileged container. - type: list - elements: str - version_added: 2.0.0 - mounts: - description: - - specifies additional mounts (separate disks) for the container. As a hash/dictionary defining mount points - type: dict - ip_address: - description: - - specifies the address the container will be assigned - type: str - onboot: - description: - - specifies whether a VM will be started during system bootup - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(no). - type: bool - storage: - description: - - target storage - type: str - default: 'local' - cpuunits: - description: - - CPU weight for a VM - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1000). - type: int - nameserver: - description: - - sets DNS server IP address for a container - type: str - searchdomain: - description: - - sets DNS search domain for a container - type: str - timeout: - description: - - timeout for operations - type: int - default: 30 - force: - description: - - forcing operations - - can be used only with states C(present), C(stopped), C(restarted) - - with C(state=present) force option allow to overwrite existing container - - with states C(stopped) , C(restarted) allow to force stop instance - type: bool - default: 'no' - purge: - description: - - Remove container from all related configurations. - - For example backup jobs, replication jobs, or HA. - - Related ACLs and Firewall entries will always be removed. - - Used with state C(absent). - type: bool - default: false - version_added: 2.3.0 - state: - description: - - Indicate desired state of the instance - type: str - choices: ['present', 'started', 'absent', 'stopped', 'restarted'] - default: present - pubkey: - description: - - Public key to add to /root/.ssh/authorized_keys. This was added on Proxmox 4.2, it is ignored for earlier versions - type: str - unprivileged: - description: - - Indicate if the container should be unprivileged - type: bool - default: 'no' - description: - description: - - Specify the description for the container. Only used on the configuration web interface. - - This is saved as a comment inside the configuration file. - type: str - version_added: '0.2.0' - hookscript: - description: - - Script that will be executed during various steps in the containers lifetime. - type: str - version_added: '0.2.0' - proxmox_default_behavior: - description: - - As of community.general 4.0.0, various options no longer have default values. - These default values caused problems when users expected different behavior from Proxmox - by default or filled options which caused problems when set. - - The value C(compatibility) (default before community.general 4.0.0) will ensure that the default values - are used when the values are not explicitly specified by the user. The new default is C(no_defaults), - which makes sure these options have no defaults. - - This affects the I(disk), I(cores), I(cpus), I(memory), I(onboot), I(swap), I(cpuunits) options. - type: str - default: no_defaults - choices: - - compatibility - - no_defaults - version_added: "1.3.0" - clone: - description: - - ID of the container to be cloned. - - I(description), I(hostname), and I(pool) will be copied from the cloned container if not specified. - - The type of clone created is defined by the I(clone_type) parameter. - - This operator is only supported for Proxmox clusters that use LXC containerization (PVE version >= 4). - type: int - version_added: 4.3.0 - clone_type: - description: - - Type of the clone created. - - C(full) creates a full clone, and I(storage) must be specified. - - C(linked) creates a linked clone, and the cloned container must be a template container. - - C(opportunistic) creates a linked clone if the cloned container is a template container, and a full clone if not. - I(storage) may be specified, if not it will fall back to the default. - type: str - choices: ['full', 'linked', 'opportunistic'] - default: opportunistic - version_added: 4.3.0 -author: Sergei Antipov (@UnderGreen) -extends_documentation_fragment: - - community.general.proxmox.documentation - - community.general.proxmox.selection -''' - -EXAMPLES = r''' -- name: Create new container with minimal options - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - -- name: Create new container with hookscript and description - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - hookscript: 'local:snippets/vm_hook.sh' - description: created with ansible - -- name: Create new container automatically selecting the next available vmid. - community.general.proxmox: - node: 'uk-mc02' - api_user: 'root@pam' - api_password: '1q2w3e' - api_host: 'node1' - password: '123456' - hostname: 'example.org' - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - -- name: Create new container with minimal options with force(it will rewrite existing container) - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - force: yes - -- name: Create new container with minimal options use environment PROXMOX_PASSWORD variable(you should export it before) - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - -- name: Create new container with minimal options defining network interface with dhcp - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - netif: '{"net0":"name=eth0,ip=dhcp,ip6=dhcp,bridge=vmbr0"}' - -- name: Create new container with minimal options defining network interface with static ip - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - netif: '{"net0":"name=eth0,gw=192.168.0.1,ip=192.168.0.2/24,bridge=vmbr0"}' - -- name: Create new container with minimal options defining a mount with 8GB - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - mounts: '{"mp0":"local:8,mp=/mnt/test/"}' - -- name: Create new container with minimal options defining a cpu core limit - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - cores: 2 - -- name: Create a new container with nesting enabled and allows the use of CIFS/NFS inside the container. - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - features: - - nesting=1 - - mount=cifs,nfs - -- name: > - Create a linked clone of the template container with id 100. The newly created container with be a - linked clone, because no storage parameter is defined - community.general.proxmox: - vmid: 201 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - clone: 100 - hostname: clone.example.org - -- name: Create a full clone of the container with id 100 - community.general.proxmox: - vmid: 201 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - clone: 100 - hostname: clone.example.org - storage: local - -- name: Start container - community.general.proxmox: - vmid: 100 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - state: started - -- name: > - Start container with mount. You should enter a 90-second timeout because servers - with additional disks take longer to boot - community.general.proxmox: - vmid: 100 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - state: started - timeout: 90 - -- name: Stop container - community.general.proxmox: - vmid: 100 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - state: stopped - -- name: Stop container with force - community.general.proxmox: - vmid: 100 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - force: yes - state: stopped - -- name: Restart container(stopped or mounted container you can't restart) - community.general.proxmox: - vmid: 100 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - state: restarted - -- name: Remove container - community.general.proxmox: - vmid: 100 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - state: absent -''' - -import time -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible.module_utils.common.text.converters import to_native - -from ansible_collections.community.general.plugins.module_utils.proxmox import ( - ansible_to_proxmox_bool, proxmox_auth_argument_spec, ProxmoxAnsible) - -VZ_TYPE = None - - -class ProxmoxLxcAnsible(ProxmoxAnsible): - def content_check(self, node, ostemplate, template_store): - return [True for cnt in self.proxmox_api.nodes(node).storage(template_store).content.get() if cnt['volid'] == ostemplate] - - def is_template_container(self, node, vmid): - """Check if the specified container is a template.""" - proxmox_node = self.proxmox_api.nodes(node) - config = getattr(proxmox_node, VZ_TYPE)(vmid).config.get() - return config['template'] - - def create_instance(self, vmid, node, disk, storage, cpus, memory, swap, timeout, clone, **kwargs): - proxmox_node = self.proxmox_api.nodes(node) - - # Remove all empty kwarg entries - kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) - - if VZ_TYPE == 'lxc': - kwargs['cpulimit'] = cpus - kwargs['rootfs'] = disk - if 'netif' in kwargs: - kwargs.update(kwargs['netif']) - del kwargs['netif'] - if 'mounts' in kwargs: - kwargs.update(kwargs['mounts']) - del kwargs['mounts'] - if 'pubkey' in kwargs: - if self.version() >= LooseVersion('4.2'): - kwargs['ssh-public-keys'] = kwargs['pubkey'] - del kwargs['pubkey'] - else: - kwargs['cpus'] = cpus - kwargs['disk'] = disk - - if clone is not None: - if VZ_TYPE != 'lxc': - self.module.fail_json(changed=False, msg="Clone operator is only supported for LXC enabled proxmox clusters.") - - clone_is_template = self.is_template_container(node, clone) - - # By default, create a full copy only when the cloned container is not a template. - create_full_copy = not clone_is_template - - # Only accept parameters that are compatible with the clone endpoint. - valid_clone_parameters = ['hostname', 'pool', 'description'] - if self.module.params['storage'] is not None and clone_is_template: - # Cloning a template, so create a full copy instead of a linked copy - create_full_copy = True - elif self.module.params['storage'] is None and not clone_is_template: - # Not cloning a template, but also no defined storage. This isn't possible. - self.module.fail_json(changed=False, msg="Cloned container is not a template, storage needs to be specified.") - - if self.module.params['clone_type'] == 'linked': - if not clone_is_template: - self.module.fail_json(changed=False, msg="'linked' clone type is specified, but cloned container is not a template container.") - # Don't need to do more, by default create_full_copy is set to false already - elif self.module.params['clone_type'] == 'opportunistic': - if not clone_is_template: - # Cloned container is not a template, so we need our 'storage' parameter - valid_clone_parameters.append('storage') - elif self.module.params['clone_type'] == 'full': - create_full_copy = True - valid_clone_parameters.append('storage') - - clone_parameters = {} - - if create_full_copy: - clone_parameters['full'] = '1' - else: - clone_parameters['full'] = '0' - for param in valid_clone_parameters: - if self.module.params[param] is not None: - clone_parameters[param] = self.module.params[param] - - taskid = getattr(proxmox_node, VZ_TYPE)(clone).clone.post(newid=vmid, **clone_parameters) - else: - taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs) - - while timeout: - if (proxmox_node.tasks(taskid).status.get()['status'] == 'stopped' and - proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK'): - return True - timeout -= 1 - if timeout == 0: - self.module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' % - proxmox_node.tasks(taskid).log.get()[:1]) - - time.sleep(1) - return False - - def start_instance(self, vm, vmid, timeout): - taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.start.post() - while timeout: - if (self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['status'] == 'stopped' and - self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'): - return True - timeout -= 1 - if timeout == 0: - self.module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' % - self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) - - time.sleep(1) - return False - - def stop_instance(self, vm, vmid, timeout, force): - if force: - taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.shutdown.post(forceStop=1) - else: - taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.shutdown.post() - while timeout: - if (self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['status'] == 'stopped' and - self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'): - return True - timeout -= 1 - if timeout == 0: - self.module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' % - self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) - - time.sleep(1) - return False - - def umount_instance(self, vm, vmid, timeout): - taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.umount.post() - while timeout: - if (self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['status'] == 'stopped' and - self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'): - return True - timeout -= 1 - if timeout == 0: - self.module.fail_json(msg='Reached timeout while waiting for unmounting VM. Last line in task before timeout: %s' % - self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) - - time.sleep(1) - return False - - -def main(): - module_args = proxmox_auth_argument_spec() - proxmox_args = dict( - vmid=dict(type='int', required=False), - node=dict(), - pool=dict(), - password=dict(no_log=True), - hostname=dict(), - ostemplate=dict(), - disk=dict(type='str'), - cores=dict(type='int'), - cpus=dict(type='int'), - memory=dict(type='int'), - swap=dict(type='int'), - netif=dict(type='dict'), - mounts=dict(type='dict'), - ip_address=dict(), - onboot=dict(type='bool'), - features=dict(type='list', elements='str'), - storage=dict(default='local'), - cpuunits=dict(type='int'), - nameserver=dict(), - searchdomain=dict(), - timeout=dict(type='int', default=30), - force=dict(type='bool', default=False), - purge=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']), - pubkey=dict(type='str', default=None), - unprivileged=dict(type='bool', default=False), - description=dict(type='str'), - hookscript=dict(type='str'), - proxmox_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']), - clone=dict(type='int'), - clone_type=dict(default='opportunistic', choices=['full', 'linked', 'opportunistic']), - ) - module_args.update(proxmox_args) - - module = AnsibleModule( - argument_spec=module_args, - required_if=[ - ('state', 'present', ['node', 'hostname']), - ('state', 'present', ('clone', 'ostemplate'), True), # Require one of clone and ostemplate. Together with mutually_exclusive this ensures that we - # either clone a container or create a new one from a template file. - ], - required_together=[ - ('api_token_id', 'api_token_secret') - ], - required_one_of=[('api_password', 'api_token_id')], - mutually_exclusive=[('clone', 'ostemplate')], # Creating a new container is done either by cloning an existing one, or based on a template. - ) - - proxmox = ProxmoxLxcAnsible(module) - - global VZ_TYPE - VZ_TYPE = 'openvz' if proxmox.version() < LooseVersion('4.0') else 'lxc' - - state = module.params['state'] - vmid = module.params['vmid'] - node = module.params['node'] - disk = module.params['disk'] - cpus = module.params['cpus'] - memory = module.params['memory'] - swap = module.params['swap'] - storage = module.params['storage'] - hostname = module.params['hostname'] - if module.params['ostemplate'] is not None: - template_store = module.params['ostemplate'].split(":")[0] - timeout = module.params['timeout'] - clone = module.params['clone'] - - if module.params['proxmox_default_behavior'] == 'compatibility': - old_default_values = dict( - disk="3", - cores=1, - cpus=1, - memory=512, - swap=0, - onboot=False, - cpuunits=1000, - ) - for param, value in old_default_values.items(): - if module.params[param] is None: - module.params[param] = value - - # If vmid not set get the Next VM id from ProxmoxAPI - # If hostname is set get the VM id from ProxmoxAPI - if not vmid and state == 'present': - vmid = proxmox.get_nextvmid() - elif not vmid and hostname: - vmid = proxmox.get_vmid(hostname, choose_first_if_multiple=True) - elif not vmid: - module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state) - - # Create a new container - if state == 'present' and clone is None: - try: - if proxmox.get_vm(vmid, ignore_missing=True) and not module.params['force']: - module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid) - # If no vmid was passed, there cannot be another VM named 'hostname' - if (not module.params['vmid'] and - proxmox.get_vmid(hostname, ignore_missing=True, choose_first_if_multiple=True) and - not module.params['force']): - vmid = proxmox.get_vmid(hostname, choose_first_if_multiple=True) - module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, vmid)) - elif not proxmox.get_node(node): - module.fail_json(msg="node '%s' not exists in cluster" % node) - elif not proxmox.content_check(node, module.params['ostemplate'], template_store): - module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s" - % (module.params['ostemplate'], node, template_store)) - except Exception as e: - module.fail_json(msg="Pre-creation checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e)) - - try: - proxmox.create_instance(vmid, node, disk, storage, cpus, memory, swap, timeout, clone, - cores=module.params['cores'], - pool=module.params['pool'], - password=module.params['password'], - hostname=module.params['hostname'], - ostemplate=module.params['ostemplate'], - netif=module.params['netif'], - mounts=module.params['mounts'], - ip_address=module.params['ip_address'], - onboot=ansible_to_proxmox_bool(module.params['onboot']), - cpuunits=module.params['cpuunits'], - nameserver=module.params['nameserver'], - searchdomain=module.params['searchdomain'], - force=ansible_to_proxmox_bool(module.params['force']), - pubkey=module.params['pubkey'], - features=",".join(module.params['features']) if module.params['features'] is not None else None, - unprivileged=ansible_to_proxmox_bool(module.params['unprivileged']), - description=module.params['description'], - hookscript=module.params['hookscript']) - - module.exit_json(changed=True, msg="Deployed VM %s from template %s" % (vmid, module.params['ostemplate'])) - except Exception as e: - module.fail_json(msg="Creation of %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e)) - - # Clone a container - elif state == 'present' and clone is not None: - try: - if proxmox.get_vm(vmid, ignore_missing=True) and not module.params['force']: - module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid) - # If no vmid was passed, there cannot be another VM named 'hostname' - if (not module.params['vmid'] and - proxmox.get_vmid(hostname, ignore_missing=True, choose_first_if_multiple=True) and - not module.params['force']): - vmid = proxmox.get_vmid(hostname, choose_first_if_multiple=True) - module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, vmid)) - if not proxmox.get_vm(clone, ignore_missing=True): - module.exit_json(changed=False, msg="Container to be cloned does not exist") - except Exception as e: - module.fail_json(msg="Pre-clone checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e)) - - try: - proxmox.create_instance(vmid, node, disk, storage, cpus, memory, swap, timeout, clone) - - module.exit_json(changed=True, msg="Cloned VM %s from %s" % (vmid, clone)) - except Exception as e: - module.fail_json(msg="Cloning %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e)) - - elif state == 'started': - try: - vm = proxmox.get_vm(vmid) - if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running': - module.exit_json(changed=False, msg="VM %s is already running" % vmid) - - if proxmox.start_instance(vm, vmid, timeout): - module.exit_json(changed=True, msg="VM %s started" % vmid) - except Exception as e: - module.fail_json(msg="starting of VM %s failed with exception: %s" % (vmid, e)) - - elif state == 'stopped': - try: - vm = proxmox.get_vm(vmid) - - if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted': - if module.params['force']: - if proxmox.umount_instance(vm, vmid, timeout): - module.exit_json(changed=True, msg="VM %s is shutting down" % vmid) - else: - module.exit_json(changed=False, msg=("VM %s is already shutdown, but mounted. " - "You can use force option to umount it.") % vmid) - - if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped': - module.exit_json(changed=False, msg="VM %s is already shutdown" % vmid) - - if proxmox.stop_instance(vm, vmid, timeout, force=module.params['force']): - module.exit_json(changed=True, msg="VM %s is shutting down" % vmid) - except Exception as e: - module.fail_json(msg="stopping of VM %s failed with exception: %s" % (vmid, e)) - - elif state == 'restarted': - try: - vm = proxmox.get_vm(vmid) - - vm_status = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] - if vm_status in ['stopped', 'mounted']: - module.exit_json(changed=False, msg="VM %s is not running" % vmid) - - if (proxmox.stop_instance(vm, vmid, timeout, force=module.params['force']) and - proxmox.start_instance(vm, vmid, timeout)): - module.exit_json(changed=True, msg="VM %s is restarted" % vmid) - except Exception as e: - module.fail_json(msg="restarting of VM %s failed with exception: %s" % (vmid, e)) - - elif state == 'absent': - try: - vm = proxmox.get_vm(vmid, ignore_missing=True) - if not vm: - module.exit_json(changed=False, msg="VM %s does not exist" % vmid) - - vm_status = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] - if vm_status == 'running': - module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid) - - if vm_status == 'mounted': - module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid) - - delete_params = {} - - if module.params['purge']: - delete_params['purge'] = 1 - - taskid = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE).delete(vmid, **delete_params) - - while timeout: - task_status = proxmox.proxmox_api.nodes(vm['node']).tasks(taskid).status.get() - if (task_status['status'] == 'stopped' and task_status['exitstatus'] == 'OK'): - module.exit_json(changed=True, msg="VM %s removed" % vmid) - timeout -= 1 - if timeout == 0: - module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s' - % proxmox.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) - - time.sleep(1) - except Exception as e: - module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, to_native(e))) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_domain_info.py b/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_domain_info.py deleted file mode 100644 index 675b04a4..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_domain_info.py +++ /dev/null @@ -1,130 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Tristan Le Guern (@tleguern) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: proxmox_domain_info -short_description: Retrieve information about one or more Proxmox VE domains -version_added: 1.3.0 -description: - - Retrieve information about one or more Proxmox VE domains. -options: - domain: - description: - - Restrict results to a specific authentication realm. - aliases: ['realm', 'name'] - type: str -author: Tristan Le Guern (@tleguern) -extends_documentation_fragment: community.general.proxmox.documentation -''' - - -EXAMPLES = ''' -- name: List existing domains - community.general.proxmox_domain_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - register: proxmox_domains - -- name: Retrieve information about the pve domain - community.general.proxmox_domain_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - domain: pve - register: proxmox_domain_pve -''' - - -RETURN = ''' -proxmox_domains: - description: List of authentication domains. - returned: always, but can be empty - type: list - elements: dict - contains: - comment: - description: Short description of the realm. - returned: on success - type: str - realm: - description: Realm name. - returned: on success - type: str - type: - description: Realm type. - returned: on success - type: str - digest: - description: Realm hash. - returned: on success, can be absent - type: str -''' - - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible_collections.community.general.plugins.module_utils.proxmox import ( - proxmox_auth_argument_spec, ProxmoxAnsible) - - -class ProxmoxDomainInfoAnsible(ProxmoxAnsible): - def get_domain(self, realm): - try: - domain = self.proxmox_api.access.domains.get(realm) - except Exception: - self.module.fail_json(msg="Domain '%s' does not exist" % realm) - domain['realm'] = realm - return domain - - def get_domains(self): - domains = self.proxmox_api.access.domains.get() - return domains - - -def proxmox_domain_info_argument_spec(): - return dict( - domain=dict(type='str', aliases=['realm', 'name']), - ) - - -def main(): - module_args = proxmox_auth_argument_spec() - domain_info_args = proxmox_domain_info_argument_spec() - module_args.update(domain_info_args) - - module = AnsibleModule( - argument_spec=module_args, - required_one_of=[('api_password', 'api_token_id')], - required_together=[('api_token_id', 'api_token_secret')], - supports_check_mode=True - ) - result = dict( - changed=False - ) - - proxmox = ProxmoxDomainInfoAnsible(module) - domain = module.params['domain'] - - if domain: - domains = [proxmox.get_domain(realm=domain)] - else: - domains = proxmox.get_domains() - result['proxmox_domains'] = domains - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_group_info.py b/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_group_info.py deleted file mode 100644 index 58b56e85..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_group_info.py +++ /dev/null @@ -1,140 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Tristan Le Guern -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: proxmox_group_info -short_description: Retrieve information about one or more Proxmox VE groups -version_added: 1.3.0 -description: - - Retrieve information about one or more Proxmox VE groups -options: - group: - description: - - Restrict results to a specific group. - aliases: ['groupid', 'name'] - type: str -author: Tristan Le Guern (@tleguern) -extends_documentation_fragment: community.general.proxmox.documentation -''' - - -EXAMPLES = ''' -- name: List existing groups - community.general.proxmox_group_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - register: proxmox_groups - -- name: Retrieve information about the admin group - community.general.proxmox_group_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - group: admin - register: proxmox_group_admin -''' - - -RETURN = ''' -proxmox_groups: - description: List of groups. - returned: always, but can be empty - type: list - elements: dict - contains: - comment: - description: Short description of the group. - returned: on success, can be absent - type: str - groupid: - description: Group name. - returned: on success - type: str - users: - description: List of users in the group. - returned: on success - type: list - elements: str -''' - - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible_collections.community.general.plugins.module_utils.proxmox import ( - proxmox_auth_argument_spec, ProxmoxAnsible) - - -class ProxmoxGroupInfoAnsible(ProxmoxAnsible): - def get_group(self, groupid): - try: - group = self.proxmox_api.access.groups.get(groupid) - except Exception: - self.module.fail_json(msg="Group '%s' does not exist" % groupid) - group['groupid'] = groupid - return ProxmoxGroup(group) - - def get_groups(self): - groups = self.proxmox_api.access.groups.get() - return [ProxmoxGroup(group) for group in groups] - - -class ProxmoxGroup: - def __init__(self, group): - self.group = dict() - # Data representation is not the same depending on API calls - for k, v in group.items(): - if k == 'users' and isinstance(v, str): - self.group['users'] = v.split(',') - elif k == 'members': - self.group['users'] = group['members'] - else: - self.group[k] = v - - -def proxmox_group_info_argument_spec(): - return dict( - group=dict(type='str', aliases=['groupid', 'name']), - ) - - -def main(): - module_args = proxmox_auth_argument_spec() - group_info_args = proxmox_group_info_argument_spec() - module_args.update(group_info_args) - - module = AnsibleModule( - argument_spec=module_args, - required_one_of=[('api_password', 'api_token_id')], - required_together=[('api_token_id', 'api_token_secret')], - supports_check_mode=True - ) - result = dict( - changed=False - ) - - proxmox = ProxmoxGroupInfoAnsible(module) - group = module.params['group'] - - if group: - groups = [proxmox.get_group(groupid=group)] - else: - groups = proxmox.get_groups() - result['proxmox_groups'] = [group.group for group in groups] - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_kvm.py b/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_kvm.py deleted file mode 100644 index 6bfb9e2e..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_kvm.py +++ /dev/null @@ -1,1408 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2016, Abdoul Bah (@helldorado) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: proxmox_kvm -short_description: Management of Qemu(KVM) Virtual Machines in Proxmox VE cluster. -description: - - Allows you to create/delete/stop Qemu(KVM) Virtual Machines in Proxmox VE cluster. - - Since community.general 4.0.0 on, there are no more default values, see I(proxmox_default_behavior). -author: "Abdoul Bah (@helldorado) " -options: - acpi: - description: - - Specify if ACPI should be enabled/disabled. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(yes). - type: bool - agent: - description: - - Specify if the QEMU Guest Agent should be enabled/disabled. - type: bool - args: - description: - - Pass arbitrary arguments to kvm. - - This option is for experts only! - - If I(proxmox_default_behavior) is set to C(compatiblity), this option has a default of - C(-serial unix:/var/run/qemu-server/.serial,server,nowait). - type: str - autostart: - description: - - Specify if the VM should be automatically restarted after crash (currently ignored in PVE API). - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(no). - type: bool - balloon: - description: - - Specify the amount of RAM for the VM in MB. - - Using zero disables the balloon driver. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(0). - type: int - bios: - description: - - Specify the BIOS implementation. - type: str - choices: ['seabios', 'ovmf'] - boot: - description: - - Specify the boot order -> boot on floppy C(a), hard disk C(c), CD-ROM C(d), or network C(n). - - You can combine to set order. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(cnd). - type: str - bootdisk: - description: - - Enable booting from specified disk. C((ide|sata|scsi|virtio)\d+) - type: str - cicustom: - description: - - 'cloud-init: Specify custom files to replace the automatically generated ones at start.' - type: str - version_added: 1.3.0 - cipassword: - description: - - 'cloud-init: password of default user to create.' - type: str - version_added: 1.3.0 - citype: - description: - - 'cloud-init: Specifies the cloud-init configuration format.' - - The default depends on the configured operating system type (C(ostype)). - - We use the C(nocloud) format for Linux, and C(configdrive2) for Windows. - type: str - choices: ['nocloud', 'configdrive2'] - version_added: 1.3.0 - ciuser: - description: - - 'cloud-init: username of default user to create.' - type: str - version_added: 1.3.0 - clone: - description: - - Name of VM to be cloned. If C(vmid) is setted, C(clone) can take arbitrary value but required for initiating the clone. - type: str - cores: - description: - - Specify number of cores per socket. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1). - type: int - cpu: - description: - - Specify emulated CPU type. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(kvm64). - type: str - cpulimit: - description: - - Specify if CPU usage will be limited. Value 0 indicates no CPU limit. - - If the computer has 2 CPUs, it has total of '2' CPU time - type: int - cpuunits: - description: - - Specify CPU weight for a VM. - - You can disable fair-scheduler configuration by setting this to 0 - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1000). - type: int - delete: - description: - - Specify a list of settings you want to delete. - type: str - description: - description: - - Specify the description for the VM. Only used on the configuration web interface. - - This is saved as comment inside the configuration file. - type: str - digest: - description: - - Specify if to prevent changes if current configuration file has different SHA1 digest. - - This can be used to prevent concurrent modifications. - type: str - efidisk0: - description: - - Specify a hash/dictionary of EFI disk options. - - Requires I(bios=ovmf) to be set to be able to use it. - type: dict - suboptions: - storage: - description: - - C(storage) is the storage identifier where to create the disk. - type: str - format: - description: - - C(format) is the drive's backing file's data format. Please refer to the Proxmox VE Administrator Guide, - section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for the latest - version, tables 3 to 14) to find out format supported by the provided storage backend. - type: str - efitype: - description: - - C(efitype) indicates the size of the EFI disk. - - C(2m) will allow for a 2MB EFI disk, which will be enough to persist boot order and new boot entries. - - C(4m) will allow for a 4MB EFI disk, which will additionally allow to store EFI keys in order to enable - Secure Boot - type: str - choices: - - 2m - - 4m - pre_enrolled_keys: - description: - - C(pre_enrolled_keys) indicates whether EFI keys for Secure Boot should be enrolled C(1) in the VM firmware - upon creation or not (0). - - If set to C(1), Secure Boot will also be enabled by default when the VM is created. - type: bool - version_added: 4.5.0 - force: - description: - - Allow to force stop VM. - - Can be used with states C(stopped), C(restarted) and C(absent). - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(no). - type: bool - format: - description: - - Target drive's backing file's data format. - - Used only with clone - - Use I(format=unspecified) and I(full=false) for a linked clone. - - Please refer to the Proxmox VE Administrator Guide, section Proxmox VE Storage (see - U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for the latest version, tables 3 to 14) to find out format - supported by the provided storage backend. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(qcow2). - If I(proxmox_default_behavior) is set to C(no_defaults), not specifying this option is equivalent to setting it to C(unspecified). - type: str - choices: [ "cloop", "cow", "qcow", "qcow2", "qed", "raw", "vmdk", "unspecified" ] - freeze: - description: - - Specify if PVE should freeze CPU at startup (use 'c' monitor command to start execution). - type: bool - full: - description: - - Create a full copy of all disk. This is always done when you clone a normal VM. - - For VM templates, we try to create a linked clone by default. - - Used only with clone - type: bool - default: 'yes' - hostpci: - description: - - Specify a hash/dictionary of map host pci devices into guest. C(hostpci='{"key":"value", "key":"value"}'). - - Keys allowed are - C(hostpci[n]) where 0 ≤ n ≤ N. - - Values allowed are - C("host="HOSTPCIID[;HOSTPCIID2...]",pcie="1|0",rombar="1|0",x-vga="1|0""). - - The C(host) parameter is Host PCI device pass through. HOSTPCIID syntax is C(bus:dev.func) (hexadecimal numbers). - - C(pcie=boolean) I(default=0) Choose the PCI-express bus (needs the q35 machine model). - - C(rombar=boolean) I(default=1) Specify whether or not the device's ROM will be visible in the guest's memory map. - - C(x-vga=boolean) I(default=0) Enable vfio-vga device support. - - /!\ This option allows direct access to host hardware. So it is no longer possible to migrate such machines - use with special care. - type: dict - hotplug: - description: - - Selectively enable hotplug features. - - This is a comma separated list of hotplug features C('network', 'disk', 'cpu', 'memory' and 'usb'). - - Value 0 disables hotplug completely and value 1 is an alias for the default C('network,disk,usb'). - type: str - hugepages: - description: - - Enable/disable hugepages memory. - type: str - choices: ['any', '2', '1024'] - ide: - description: - - A hash/dictionary of volume used as IDE hard disk or CD-ROM. C(ide='{"key":"value", "key":"value"}'). - - Keys allowed are - C(ide[n]) where 0 ≤ n ≤ 3. - - Values allowed are - C("storage:size,format=value"). - - C(storage) is the storage identifier where to create the disk. - - C(size) is the size of the disk in GB. - - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). Please refer to the Proxmox VE - Administrator Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for - the latest version, tables 3 to 14) to find out format supported by the provided storage backend. - type: dict - ipconfig: - description: - - 'cloud-init: Set the IP configuration.' - - A hash/dictionary of network ip configurations. C(ipconfig='{"key":"value", "key":"value"}'). - - Keys allowed are - C(ipconfig[n]) where 0 ≤ n ≤ network interfaces. - - Values allowed are - C("[gw=] [,gw6=] [,ip=] [,ip6=]"). - - 'cloud-init: Specify IP addresses and gateways for the corresponding interface.' - - IP addresses use CIDR notation, gateways are optional but they should be in the same subnet of specified IP address. - - The special string 'dhcp' can be used for IP addresses to use DHCP, in which case no explicit gateway should be provided. - - For IPv6 the special string 'auto' can be used to use stateless autoconfiguration. - - If cloud-init is enabled and neither an IPv4 nor an IPv6 address is specified, it defaults to using dhcp on IPv4. - type: dict - version_added: 1.3.0 - keyboard: - description: - - Sets the keyboard layout for VNC server. - type: str - kvm: - description: - - Enable/disable KVM hardware virtualization. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(yes). - type: bool - localtime: - description: - - Sets the real time clock to local time. - - This is enabled by default if ostype indicates a Microsoft OS. - type: bool - lock: - description: - - Lock/unlock the VM. - type: str - choices: ['migrate', 'backup', 'snapshot', 'rollback'] - machine: - description: - - Specifies the Qemu machine type. - - type => C((pc|pc(-i440fx)?-\d+\.\d+(\.pxe)?|q35|pc-q35-\d+\.\d+(\.pxe)?)) - type: str - memory: - description: - - Memory size in MB for instance. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(512). - type: int - migrate_downtime: - description: - - Sets maximum tolerated downtime (in seconds) for migrations. - type: int - migrate_speed: - description: - - Sets maximum speed (in MB/s) for migrations. - - A value of 0 is no limit. - type: int - name: - description: - - Specifies the VM name. Only used on the configuration web interface. - - Required only for C(state=present). - type: str - nameservers: - description: - - 'cloud-init: DNS server IP address(es).' - - If unset, PVE host settings are used. - type: list - elements: str - version_added: 1.3.0 - net: - description: - - A hash/dictionary of network interfaces for the VM. C(net='{"key":"value", "key":"value"}'). - - Keys allowed are - C(net[n]) where 0 ≤ n ≤ N. - - Values allowed are - C("model="XX:XX:XX:XX:XX:XX",bridge="value",rate="value",tag="value",firewall="1|0",trunks="vlanid""). - - Model is one of C(e1000 e1000-82540em e1000-82544gc e1000-82545em i82551 i82557b i82559er ne2k_isa ne2k_pci pcnet rtl8139 virtio vmxnet3). - - C(XX:XX:XX:XX:XX:XX) should be an unique MAC address. This is automatically generated if not specified. - - The C(bridge) parameter can be used to automatically add the interface to a bridge device. The Proxmox VE standard bridge is called 'vmbr0'. - - Option C(rate) is used to limit traffic bandwidth from and to this interface. It is specified as floating point number, unit is 'Megabytes per second'. - - If you specify no bridge, we create a kvm 'user' (NATed) network device, which provides DHCP and DNS services. - type: dict - newid: - description: - - VMID for the clone. Used only with clone. - - If newid is not set, the next available VM ID will be fetched from ProxmoxAPI. - type: int - numa: - description: - - A hash/dictionaries of NUMA topology. C(numa='{"key":"value", "key":"value"}'). - - Keys allowed are - C(numa[n]) where 0 ≤ n ≤ N. - - Values allowed are - C("cpu="",hostnodes="",memory="number",policy="(bind|interleave|preferred)""). - - C(cpus) CPUs accessing this NUMA node. - - C(hostnodes) Host NUMA nodes to use. - - C(memory) Amount of memory this NUMA node provides. - - C(policy) NUMA allocation policy. - type: dict - numa_enabled: - description: - - Enables NUMA. - type: bool - onboot: - description: - - Specifies whether a VM will be started during system bootup. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(yes). - type: bool - ostype: - description: - - Specifies guest operating system. This is used to enable special optimization/features for specific operating systems. - - The l26 is Linux 2.6/3.X Kernel. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(l26). - type: str - choices: ['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'win10', 'win11', 'l24', 'l26', 'solaris'] - parallel: - description: - - A hash/dictionary of map host parallel devices. C(parallel='{"key":"value", "key":"value"}'). - - Keys allowed are - (parallel[n]) where 0 ≤ n ≤ 2. - - Values allowed are - C("/dev/parport\d+|/dev/usb/lp\d+"). - type: dict - protection: - description: - - Enable/disable the protection flag of the VM. This will enable/disable the remove VM and remove disk operations. - type: bool - reboot: - description: - - Allow reboot. If set to C(yes), the VM exit on reboot. - type: bool - revert: - description: - - Revert a pending change. - type: str - sata: - description: - - A hash/dictionary of volume used as sata hard disk or CD-ROM. C(sata='{"key":"value", "key":"value"}'). - - Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 5. - - Values allowed are - C("storage:size,format=value"). - - C(storage) is the storage identifier where to create the disk. - - C(size) is the size of the disk in GB. - - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). Please refer to the Proxmox VE - Administrator Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for - the latest version, tables 3 to 14) to find out format supported by the provided storage backend. - type: dict - scsi: - description: - - A hash/dictionary of volume used as SCSI hard disk or CD-ROM. C(scsi='{"key":"value", "key":"value"}'). - - Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 13. - - Values allowed are - C("storage:size,format=value"). - - C(storage) is the storage identifier where to create the disk. - - C(size) is the size of the disk in GB. - - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). Please refer to the Proxmox VE - Administrator Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for - the latest version, tables 3 to 14) to find out format supported by the provided storage backend. - type: dict - scsihw: - description: - - Specifies the SCSI controller model. - type: str - choices: ['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi'] - searchdomains: - description: - - 'cloud-init: Sets DNS search domain(s).' - - If unset, PVE host settings are used. - type: list - elements: str - version_added: 1.3.0 - serial: - description: - - A hash/dictionary of serial device to create inside the VM. C('{"key":"value", "key":"value"}'). - - Keys allowed are - serial[n](str; required) where 0 ≤ n ≤ 3. - - Values allowed are - C((/dev/.+|socket)). - - /!\ If you pass through a host serial device, it is no longer possible to migrate such machines - use with special care. - type: dict - shares: - description: - - Rets amount of memory shares for auto-ballooning. (0 - 50000). - - The larger the number is, the more memory this VM gets. - - The number is relative to weights of all other running VMs. - - Using 0 disables auto-ballooning, this means no limit. - type: int - skiplock: - description: - - Ignore locks - - Only root is allowed to use this option. - type: bool - smbios: - description: - - Specifies SMBIOS type 1 fields. - type: str - snapname: - description: - - The name of the snapshot. Used only with clone. - type: str - sockets: - description: - - Sets the number of CPU sockets. (1 - N). - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1). - type: int - sshkeys: - description: - - 'cloud-init: SSH key to assign to the default user. NOT TESTED with multiple keys but a multi-line value should work.' - type: str - version_added: 1.3.0 - startdate: - description: - - Sets the initial date of the real time clock. - - Valid format for date are C('now') or C('2016-09-25T16:01:21') or C('2016-09-25'). - type: str - startup: - description: - - Startup and shutdown behavior. C([[order=]\d+] [,up=\d+] [,down=\d+]). - - Order is a non-negative number defining the general startup order. - - Shutdown in done with reverse ordering. - type: str - state: - description: - - Indicates desired state of the instance. - - If C(current), the current state of the VM will be fetched. You can access it with C(results.status) - type: str - choices: ['present', 'started', 'absent', 'stopped', 'restarted','current'] - default: present - storage: - description: - - Target storage for full clone. - type: str - tablet: - description: - - Enables/disables the USB tablet device. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(no). - type: bool - tags: - description: - - List of tags to apply to the VM instance. - - Tags must start with C([a-z0-9_]) followed by zero or more of the following characters C([a-z0-9_-+.]). - - Tags are only available in Proxmox 6+. - type: list - elements: str - version_added: 2.3.0 - target: - description: - - Target node. Only allowed if the original VM is on shared storage. - - Used only with clone - type: str - tdf: - description: - - Enables/disables time drift fix. - type: bool - template: - description: - - Enables/disables the template. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(no). - type: bool - timeout: - description: - - Timeout for operations. - type: int - default: 30 - update: - description: - - If C(yes), the VM will be updated with new value. - - Cause of the operations of the API and security reasons, I have disabled the update of the following parameters - - C(net, virtio, ide, sata, scsi). Per example updating C(net) update the MAC address and C(virtio) create always new disk... - - Update of C(pool) is disabled. It needs an additional API endpoint not covered by this module. - type: bool - default: 'no' - vcpus: - description: - - Sets number of hotplugged vcpus. - type: int - vga: - description: - - Select VGA type. If you want to use high resolution modes (>= 1280x1024x16) then you should use option 'std' or 'vmware'. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(std). - type: str - choices: ['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4'] - virtio: - description: - - A hash/dictionary of volume used as VIRTIO hard disk. C(virtio='{"key":"value", "key":"value"}'). - - Keys allowed are - C(virto[n]) where 0 ≤ n ≤ 15. - - Values allowed are - C("storage:size,format=value"). - - C(storage) is the storage identifier where to create the disk. - - C(size) is the size of the disk in GB. - - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). Please refer to the Proxmox VE - Administrator Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) - for the latest version, tables 3 to 14) to find out format supported by the provided storage backend. - type: dict - watchdog: - description: - - Creates a virtual hardware watchdog device. - type: str - proxmox_default_behavior: - description: - - As of community.general 4.0.0, various options no longer have default values. - These default values caused problems when users expected different behavior from Proxmox - by default or filled options which caused problems when set. - - The value C(compatibility) (default before community.general 4.0.0) will ensure that the default values - are used when the values are not explicitly specified by the user. The new default is C(no_defaults), - which makes sure these options have no defaults. - - This affects the I(acpi), I(autostart), I(balloon), I(boot), I(cores), I(cpu), - I(cpuunits), I(force), I(format), I(kvm), I(memory), I(onboot), I(ostype), I(sockets), - I(tablet), I(template), I(vga), options. - type: str - default: no_defaults - choices: - - compatibility - - no_defaults - version_added: "1.3.0" -extends_documentation_fragment: - - community.general.proxmox.documentation - - community.general.proxmox.selection -''' - -EXAMPLES = ''' -- name: Create new VM with minimal options - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - -- name: Create new VM with minimal options and given vmid - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - vmid: 100 - -- name: Create new VM with two network interface options - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - net: - net0: 'virtio,bridge=vmbr1,rate=200' - net1: 'e1000,bridge=vmbr2' - -- name: Create new VM with one network interface, three virto hard disk, 4 cores, and 2 vcpus - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - net: - net0: 'virtio,bridge=vmbr1,rate=200' - virtio: - virtio0: 'VMs_LVM:10' - virtio1: 'VMs:2,format=qcow2' - virtio2: 'VMs:5,format=raw' - cores: 4 - vcpus: 2 - -- name: Create VM with 1 10GB SATA disk and an EFI disk, with Secure Boot disabled by default - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - sata: - sata0: 'VMs_LVM:10,format=raw' - bios: ovmf - efidisk0: - storage: VMs_LVM_thin - format: raw - efitype: 4m - pre_enrolled_keys: False - -- name: Create VM with 1 10GB SATA disk and an EFI disk, with Secure Boot enabled by default - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - sata: - sata0: 'VMs_LVM:10,format=raw' - bios: ovmf - efidisk0: - storage: VMs_LVM - format: raw - efitype: 4m - pre_enrolled_keys: 1 - -- name: > - Clone VM with only source VM name. - The VM source is spynal. - The target VM name is zavala - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - clone: spynal - name: zavala - node: sabrewulf - storage: VMs - format: qcow2 - timeout: 500 - -- name: > - Create linked clone VM with only source VM name. - The VM source is spynal. - The target VM name is zavala - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - clone: spynal - name: zavala - node: sabrewulf - storage: VMs - full: no - format: unspecified - timeout: 500 - -- name: Clone VM with source vmid and target newid and raw format - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - clone: arbitrary_name - vmid: 108 - newid: 152 - name: zavala - node: sabrewulf - storage: LVM_STO - format: raw - timeout: 300 - -- name: Create new VM and lock it for snapshot - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - lock: snapshot - -- name: Create new VM and set protection to disable the remove VM and remove disk operations - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - protection: yes - -- name: Create new VM using cloud-init with a username and password - community.general.proxmox_kvm: - node: sabrewulf - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - ide: - ide2: 'local:cloudinit,format=qcow2' - ciuser: mylinuxuser - cipassword: supersecret - searchdomains: 'mydomain.internal' - nameservers: 1.1.1.1 - net: - net0: 'virtio,bridge=vmbr1,tag=77' - ipconfig: - ipconfig0: 'ip=192.168.1.1/24,gw=192.168.1.1' - -- name: Create new VM using Cloud-Init with an ssh key - community.general.proxmox_kvm: - node: sabrewulf - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - ide: - ide2: 'local:cloudinit,format=qcow2' - sshkeys: 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILJkVm98B71lD5XHfihwcYHE9TVpsJmK1vR1JcaU82L+' - searchdomains: 'mydomain.internal' - nameservers: - - '1.1.1.1' - - '8.8.8.8' - net: - net0: 'virtio,bridge=vmbr1,tag=77' - ipconfig: - ipconfig0: 'ip=192.168.1.1/24' - -- name: Start VM - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - state: started - -- name: Stop VM - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - state: stopped - -- name: Stop VM with force - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - state: stopped - force: yes - -- name: Restart VM - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - state: restarted - -- name: Remove VM - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - state: absent - -- name: Get VM current state - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - state: current - -- name: Update VM configuration - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - cores: 8 - memory: 16384 - update: yes - -- name: Delete QEMU parameters - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - delete: 'args,template,cpulimit' - -- name: Revert a pending change - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - revert: 'template,cpulimit' -''' - -RETURN = ''' -vmid: - description: The VM vmid. - returned: success - type: int - sample: 115 -status: - description: The current virtual machine status. - returned: success, not clone, not absent, not update - type: str - sample: running -msg: - description: A short message - returned: always - type: str - sample: "VM kropta with vmid = 110 is running" -''' - -import re -import time -import traceback -from ansible.module_utils.six.moves.urllib.parse import quote - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion -from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible) - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -def parse_mac(netstr): - return re.search('=(.*?),', netstr).group(1) - - -def parse_dev(devstr): - return re.search('(.*?)(,|$)', devstr).group(1) - - -class ProxmoxKvmAnsible(ProxmoxAnsible): - def get_vminfo(self, node, vmid, **kwargs): - global results - results = {} - mac = {} - devices = {} - try: - vm = self.proxmox_api.nodes(node).qemu(vmid).config.get() - except Exception as e: - self.module.fail_json(msg='Getting information for VM with vmid = %s failed with exception: %s' % (vmid, e)) - - # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. - kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) - - # Convert all dict in kwargs to elements. - # For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n] - for k in list(kwargs.keys()): - if isinstance(kwargs[k], dict): - kwargs.update(kwargs[k]) - del kwargs[k] - - # Split information by type - re_net = re.compile(r'net[0-9]') - re_dev = re.compile(r'(virtio|ide|scsi|sata|efidisk)[0-9]') - for k in kwargs.keys(): - if re_net.match(k): - mac[k] = parse_mac(vm[k]) - elif re_dev.match(k): - devices[k] = parse_dev(vm[k]) - - results['mac'] = mac - results['devices'] = devices - results['vmid'] = int(vmid) - - def settings(self, vmid, node, **kwargs): - proxmox_node = self.proxmox_api.nodes(node) - - # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. - kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) - - return proxmox_node.qemu(vmid).config.set(**kwargs) is None - - def wait_for_task(self, node, taskid): - timeout = self.module.params['timeout'] - - while timeout: - task = self.proxmox_api.nodes(node).tasks(taskid).status.get() - if task['status'] == 'stopped' and task['exitstatus'] == 'OK': - # Wait an extra second as the API can be a ahead of the hypervisor - time.sleep(1) - return True - timeout = timeout - 1 - if timeout == 0: - break - time.sleep(1) - return False - - def create_vm(self, vmid, newid, node, name, memory, cpu, cores, sockets, update, **kwargs): - # Available only in PVE 4 - only_v4 = ['force', 'protection', 'skiplock'] - only_v6 = ['ciuser', 'cipassword', 'sshkeys', 'ipconfig', 'tags'] - - # valide clone parameters - valid_clone_params = ['format', 'full', 'pool', 'snapname', 'storage', 'target'] - clone_params = {} - # Default args for vm. Note: -args option is for experts only. It allows you to pass arbitrary arguments to kvm. - vm_args = "-serial unix:/var/run/qemu-server/{0}.serial,server,nowait".format(vmid) - - proxmox_node = self.proxmox_api.nodes(node) - - # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. - kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) - kwargs.update(dict([k, int(v)] for k, v in kwargs.items() if isinstance(v, bool))) - - version = self.version() - pve_major_version = 3 if version < LooseVersion('4.0') else version.version[0] - - # The features work only on PVE 4+ - if pve_major_version < 4: - for p in only_v4: - if p in kwargs: - del kwargs[p] - - # The features work only on PVE 6 - if pve_major_version < 6: - for p in only_v6: - if p in kwargs: - del kwargs[p] - - # 'sshkeys' param expects an urlencoded string - if 'sshkeys' in kwargs: - urlencoded_ssh_keys = quote(kwargs['sshkeys'], safe='') - kwargs['sshkeys'] = str(urlencoded_ssh_keys) - - # If update, don't update disk (virtio, efidisk0, ide, sata, scsi) and network interface - # pool parameter not supported by qemu//config endpoint on "update" (PVE 6.2) - only with "create" - if update: - if 'virtio' in kwargs: - del kwargs['virtio'] - if 'sata' in kwargs: - del kwargs['sata'] - if 'scsi' in kwargs: - del kwargs['scsi'] - if 'ide' in kwargs: - del kwargs['ide'] - if 'efidisk0' in kwargs: - del kwargs['efidisk0'] - if 'net' in kwargs: - del kwargs['net'] - if 'force' in kwargs: - del kwargs['force'] - if 'pool' in kwargs: - del kwargs['pool'] - - # Check that the bios option is set to ovmf if the efidisk0 option is present - if 'efidisk0' in kwargs: - if ('bios' not in kwargs) or ('ovmf' != kwargs['bios']): - self.module.fail_json(msg='efidisk0 cannot be used if bios is not set to ovmf. ') - - # Flatten efidisk0 option to a string so that it's a string which is what Proxmoxer and the API expect - if 'efidisk0' in kwargs: - efidisk0_str = '' - # Regexp to catch underscores in keys name, to replace them after by hypens - hyphen_re = re.compile(r'_') - # If present, the storage definition should be the first argument - if 'storage' in kwargs['efidisk0']: - efidisk0_str += kwargs['efidisk0'].get('storage') + ':1,' - kwargs['efidisk0'].pop('storage') - # Join other elements from the dict as key=value using commas as separator, replacing any underscore in key - # by hyphens (needed for pre_enrolled_keys to pre-enrolled-keys) - efidisk0_str += ','.join([hyphen_re.sub('-', k) + "=" + str(v) for k, v in kwargs['efidisk0'].items() - if 'storage' != k]) - kwargs['efidisk0'] = efidisk0_str - - # Convert all dict in kwargs to elements. - # For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n], ipconfig[n] - for k in list(kwargs.keys()): - if isinstance(kwargs[k], dict): - kwargs.update(kwargs[k]) - del kwargs[k] - - # Rename numa_enabled to numa. According the API documentation - if 'numa_enabled' in kwargs: - kwargs['numa'] = kwargs['numa_enabled'] - del kwargs['numa_enabled'] - - # PVE api expects strings for the following params - if 'nameservers' in self.module.params: - nameservers = self.module.params.pop('nameservers') - if nameservers: - kwargs['nameserver'] = ' '.join(nameservers) - if 'searchdomains' in self.module.params: - searchdomains = self.module.params.pop('searchdomains') - if searchdomains: - kwargs['searchdomain'] = ' '.join(searchdomains) - - # VM tags are expected to be valid and presented as a comma/semi-colon delimited string - if 'tags' in kwargs: - re_tag = re.compile(r'^[a-z0-9_][a-z0-9_\-\+\.]*$') - for tag in kwargs['tags']: - if not re_tag.match(tag): - self.module.fail_json(msg='%s is not a valid tag' % tag) - kwargs['tags'] = ",".join(kwargs['tags']) - - # -args and skiplock require root@pam user - but can not use api tokens - if self.module.params['api_user'] == "root@pam" and self.module.params['args'] is None: - if not update and self.module.params['proxmox_default_behavior'] == 'compatibility': - kwargs['args'] = vm_args - elif self.module.params['api_user'] == "root@pam" and self.module.params['args'] is not None: - kwargs['args'] = self.module.params['args'] - elif self.module.params['api_user'] != "root@pam" and self.module.params['args'] is not None: - self.module.fail_json(msg='args parameter require root@pam user. ') - - if self.module.params['api_user'] != "root@pam" and self.module.params['skiplock'] is not None: - self.module.fail_json(msg='skiplock parameter require root@pam user. ') - - if update: - if proxmox_node.qemu(vmid).config.set(name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs) is None: - return True - else: - return False - elif self.module.params['clone'] is not None: - for param in valid_clone_params: - if self.module.params[param] is not None: - clone_params[param] = self.module.params[param] - clone_params.update(dict([k, int(v)] for k, v in clone_params.items() if isinstance(v, bool))) - taskid = proxmox_node.qemu(vmid).clone.post(newid=newid, name=name, **clone_params) - else: - taskid = proxmox_node.qemu.create(vmid=vmid, name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs) - - if not self.wait_for_task(node, taskid): - self.module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' % - proxmox_node.tasks(taskid).log.get()[:1]) - return False - return True - - def start_vm(self, vm): - vmid = vm['vmid'] - proxmox_node = self.proxmox_api.nodes(vm['node']) - taskid = proxmox_node.qemu(vmid).status.start.post() - if not self.wait_for_task(vm['node'], taskid): - self.module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' % - proxmox_node.tasks(taskid).log.get()[:1]) - return False - return True - - def stop_vm(self, vm, force): - vmid = vm['vmid'] - proxmox_node = self.proxmox_api.nodes(vm['node']) - taskid = proxmox_node.qemu(vmid).status.shutdown.post(forceStop=(1 if force else 0)) - if not self.wait_for_task(vm['node'], taskid): - self.module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' % - proxmox_node.tasks(taskid).log.get()[:1]) - return False - return True - - -def main(): - module_args = proxmox_auth_argument_spec() - kvm_args = dict( - acpi=dict(type='bool'), - agent=dict(type='bool'), - args=dict(type='str'), - autostart=dict(type='bool'), - balloon=dict(type='int'), - bios=dict(choices=['seabios', 'ovmf']), - boot=dict(type='str'), - bootdisk=dict(type='str'), - cicustom=dict(type='str'), - cipassword=dict(type='str', no_log=True), - citype=dict(type='str', choices=['nocloud', 'configdrive2']), - ciuser=dict(type='str'), - clone=dict(type='str'), - cores=dict(type='int'), - cpu=dict(type='str'), - cpulimit=dict(type='int'), - cpuunits=dict(type='int'), - delete=dict(type='str'), - description=dict(type='str'), - digest=dict(type='str'), - efidisk0=dict(type='dict', - options=dict( - storage=dict(type='str'), - format=dict(type='str'), - efitype=dict(type='str', choices=['2m', '4m']), - pre_enrolled_keys=dict(type='bool'), - )), - force=dict(type='bool'), - format=dict(type='str', choices=['cloop', 'cow', 'qcow', 'qcow2', 'qed', 'raw', 'vmdk', 'unspecified']), - freeze=dict(type='bool'), - full=dict(type='bool', default=True), - hostpci=dict(type='dict'), - hotplug=dict(type='str'), - hugepages=dict(choices=['any', '2', '1024']), - ide=dict(type='dict'), - ipconfig=dict(type='dict'), - keyboard=dict(type='str'), - kvm=dict(type='bool'), - localtime=dict(type='bool'), - lock=dict(choices=['migrate', 'backup', 'snapshot', 'rollback']), - machine=dict(type='str'), - memory=dict(type='int'), - migrate_downtime=dict(type='int'), - migrate_speed=dict(type='int'), - name=dict(type='str'), - nameservers=dict(type='list', elements='str'), - net=dict(type='dict'), - newid=dict(type='int'), - node=dict(), - numa=dict(type='dict'), - numa_enabled=dict(type='bool'), - onboot=dict(type='bool'), - ostype=dict(choices=['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'win10', 'win11', 'l24', 'l26', 'solaris']), - parallel=dict(type='dict'), - pool=dict(type='str'), - protection=dict(type='bool'), - reboot=dict(type='bool'), - revert=dict(type='str'), - sata=dict(type='dict'), - scsi=dict(type='dict'), - scsihw=dict(choices=['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi']), - serial=dict(type='dict'), - searchdomains=dict(type='list', elements='str'), - shares=dict(type='int'), - skiplock=dict(type='bool'), - smbios=dict(type='str'), - snapname=dict(type='str'), - sockets=dict(type='int'), - sshkeys=dict(type='str', no_log=False), - startdate=dict(type='str'), - startup=dict(), - state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted', 'current']), - storage=dict(type='str'), - tablet=dict(type='bool'), - tags=dict(type='list', elements='str'), - target=dict(type='str'), - tdf=dict(type='bool'), - template=dict(type='bool'), - timeout=dict(type='int', default=30), - update=dict(type='bool', default=False), - vcpus=dict(type='int'), - vga=dict(choices=['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']), - virtio=dict(type='dict'), - vmid=dict(type='int'), - watchdog=dict(), - proxmox_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']), - ) - module_args.update(kvm_args) - - module = AnsibleModule( - argument_spec=module_args, - mutually_exclusive=[('delete', 'revert'), ('delete', 'update'), ('revert', 'update'), ('clone', 'update'), ('clone', 'delete'), ('clone', 'revert')], - required_together=[('api_token_id', 'api_token_secret')], - required_one_of=[('name', 'vmid'), ('api_password', 'api_token_id')], - required_if=[('state', 'present', ['node'])], - ) - - clone = module.params['clone'] - cpu = module.params['cpu'] - cores = module.params['cores'] - delete = module.params['delete'] - memory = module.params['memory'] - name = module.params['name'] - newid = module.params['newid'] - node = module.params['node'] - revert = module.params['revert'] - sockets = module.params['sockets'] - state = module.params['state'] - update = bool(module.params['update']) - vmid = module.params['vmid'] - validate_certs = module.params['validate_certs'] - - if module.params['proxmox_default_behavior'] == 'compatibility': - old_default_values = dict( - acpi=True, - autostart=False, - balloon=0, - boot='cnd', - cores=1, - cpu='kvm64', - cpuunits=1000, - format='qcow2', - kvm=True, - memory=512, - ostype='l26', - sockets=1, - tablet=False, - template=False, - vga='std', - ) - for param, value in old_default_values.items(): - if module.params[param] is None: - module.params[param] = value - - if module.params['format'] == 'unspecified': - module.params['format'] = None - - proxmox = ProxmoxKvmAnsible(module) - - # If vmid is not defined then retrieve its value from the vm name, - # the cloned vm name or retrieve the next free VM id from ProxmoxAPI. - if not vmid: - if state == 'present' and not update and not clone and not delete and not revert: - try: - vmid = proxmox.get_nextvmid() - except Exception: - module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name)) - else: - clone_target = clone or name - vmid = proxmox.get_vmid(clone_target, ignore_missing=True, choose_first_if_multiple=True) - - if clone is not None: - # If newid is not defined then retrieve the next free id from ProxmoxAPI - if not newid: - try: - newid = proxmox.get_nextvmid() - except Exception: - module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name)) - - # Ensure source VM name exists when cloning - if not vmid: - module.fail_json(msg='VM with name = %s does not exist in cluster' % clone) - - # Ensure source VM id exists when cloning - proxmox.get_vm(vmid) - - # Ensure the choosen VM name doesn't already exist when cloning - existing_vmid = proxmox.get_vmid(name, ignore_missing=True, choose_first_if_multiple=True) - if existing_vmid: - module.exit_json(changed=False, vmid=existing_vmid, msg="VM with name <%s> already exists" % name) - - # Ensure the choosen VM id doesn't already exist when cloning - if proxmox.get_vm(newid, ignore_missing=True): - module.exit_json(changed=False, vmid=vmid, msg="vmid %s with VM name %s already exists" % (newid, name)) - - if delete is not None: - try: - proxmox.settings(vmid, node, delete=delete) - module.exit_json(changed=True, vmid=vmid, msg="Settings has deleted on VM {0} with vmid {1}".format(name, vmid)) - except Exception as e: - module.fail_json(vmid=vmid, msg='Unable to delete settings on VM {0} with vmid {1}: '.format(name, vmid) + str(e)) - - if revert is not None: - try: - proxmox.settings(vmid, node, revert=revert) - module.exit_json(changed=True, vmid=vmid, msg="Settings has reverted on VM {0} with vmid {1}".format(name, vmid)) - except Exception as e: - module.fail_json(vmid=vmid, msg='Unable to revert settings on VM {0} with vmid {1}: Maybe is not a pending task... '.format(name, vmid) + str(e)) - - if state == 'present': - try: - if proxmox.get_vm(vmid, ignore_missing=True) and not (update or clone): - module.exit_json(changed=False, vmid=vmid, msg="VM with vmid <%s> already exists" % vmid) - elif proxmox.get_vmid(name, ignore_missing=True, choose_first_if_multiple=True) and not (update or clone): - module.exit_json(changed=False, vmid=proxmox.get_vmid(name, choose_first_if_multiple=True), msg="VM with name <%s> already exists" % name) - elif not (node, name): - module.fail_json(msg='node, name is mandatory for creating/updating vm') - elif not proxmox.get_node(node): - module.fail_json(msg="node '%s' does not exist in cluster" % node) - - proxmox.create_vm(vmid, newid, node, name, memory, cpu, cores, sockets, update, - acpi=module.params['acpi'], - agent=module.params['agent'], - autostart=module.params['autostart'], - balloon=module.params['balloon'], - bios=module.params['bios'], - boot=module.params['boot'], - bootdisk=module.params['bootdisk'], - cicustom=module.params['cicustom'], - cipassword=module.params['cipassword'], - citype=module.params['citype'], - ciuser=module.params['ciuser'], - cpulimit=module.params['cpulimit'], - cpuunits=module.params['cpuunits'], - description=module.params['description'], - digest=module.params['digest'], - efidisk0=module.params['efidisk0'], - force=module.params['force'], - freeze=module.params['freeze'], - hostpci=module.params['hostpci'], - hotplug=module.params['hotplug'], - hugepages=module.params['hugepages'], - ide=module.params['ide'], - ipconfig=module.params['ipconfig'], - keyboard=module.params['keyboard'], - kvm=module.params['kvm'], - localtime=module.params['localtime'], - lock=module.params['lock'], - machine=module.params['machine'], - migrate_downtime=module.params['migrate_downtime'], - migrate_speed=module.params['migrate_speed'], - net=module.params['net'], - numa=module.params['numa'], - numa_enabled=module.params['numa_enabled'], - onboot=module.params['onboot'], - ostype=module.params['ostype'], - parallel=module.params['parallel'], - pool=module.params['pool'], - protection=module.params['protection'], - reboot=module.params['reboot'], - sata=module.params['sata'], - scsi=module.params['scsi'], - scsihw=module.params['scsihw'], - serial=module.params['serial'], - shares=module.params['shares'], - skiplock=module.params['skiplock'], - smbios1=module.params['smbios'], - snapname=module.params['snapname'], - sshkeys=module.params['sshkeys'], - startdate=module.params['startdate'], - startup=module.params['startup'], - tablet=module.params['tablet'], - tags=module.params['tags'], - target=module.params['target'], - tdf=module.params['tdf'], - template=module.params['template'], - vcpus=module.params['vcpus'], - vga=module.params['vga'], - virtio=module.params['virtio'], - watchdog=module.params['watchdog']) - - if not clone: - proxmox.get_vminfo(node, vmid, - ide=module.params['ide'], - net=module.params['net'], - sata=module.params['sata'], - scsi=module.params['scsi'], - virtio=module.params['virtio']) - if update: - module.exit_json(changed=True, vmid=vmid, msg="VM %s with vmid %s updated" % (name, vmid)) - elif clone is not None: - module.exit_json(changed=True, vmid=newid, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid)) - else: - module.exit_json(changed=True, msg="VM %s with vmid %s deployed" % (name, vmid), **results) - except Exception as e: - if update: - module.fail_json(vmid=vmid, msg="Unable to update vm {0} with vmid {1}=".format(name, vmid) + str(e)) - elif clone is not None: - module.fail_json(vmid=vmid, msg="Unable to clone vm {0} from vmid {1}=".format(name, vmid) + str(e)) - else: - module.fail_json(vmid=vmid, msg="creation of qemu VM %s with vmid %s failed with exception=%s" % (name, vmid, e)) - - elif state == 'started': - status = {} - try: - if not vmid: - module.fail_json(msg='VM with name = %s does not exist in cluster' % name) - vm = proxmox.get_vm(vmid) - status['status'] = vm['status'] - if vm['status'] == 'running': - module.exit_json(changed=False, vmid=vmid, msg="VM %s is already running" % vmid, **status) - - if proxmox.start_vm(vm): - module.exit_json(changed=True, vmid=vmid, msg="VM %s started" % vmid, **status) - except Exception as e: - module.fail_json(vmid=vmid, msg="starting of VM %s failed with exception: %s" % (vmid, e), **status) - - elif state == 'stopped': - status = {} - try: - if not vmid: - module.fail_json(msg='VM with name = %s does not exist in cluster' % name) - - vm = proxmox.get_vm(vmid) - - status['status'] = vm['status'] - if vm['status'] == 'stopped': - module.exit_json(changed=False, vmid=vmid, msg="VM %s is already stopped" % vmid, **status) - - if proxmox.stop_vm(vm, force=module.params['force']): - module.exit_json(changed=True, vmid=vmid, msg="VM %s is shutting down" % vmid, **status) - except Exception as e: - module.fail_json(vmid=vmid, msg="stopping of VM %s failed with exception: %s" % (vmid, e), **status) - - elif state == 'restarted': - status = {} - try: - if not vmid: - module.fail_json(msg='VM with name = %s does not exist in cluster' % name) - - vm = proxmox.get_vm(vmid) - status['status'] = vm['status'] - if vm['status'] == 'stopped': - module.exit_json(changed=False, vmid=vmid, msg="VM %s is not running" % vmid, **status) - - if proxmox.stop_vm(vm, force=module.params['force']) and proxmox.start_vm(vm): - module.exit_json(changed=True, vmid=vmid, msg="VM %s is restarted" % vmid, **status) - except Exception as e: - module.fail_json(vmid=vmid, msg="restarting of VM %s failed with exception: %s" % (vmid, e), **status) - - elif state == 'absent': - status = {} - try: - vm = proxmox.get_vm(vmid, ignore_missing=True) - if not vm: - module.exit_json(changed=False, vmid=vmid) - - proxmox_node = proxmox.proxmox_api.nodes(vm['node']) - status['status'] = vm['status'] - if vm['status'] == 'running': - if module.params['force']: - proxmox.stop_vm(vm, True) - else: - module.exit_json(changed=False, vmid=vmid, msg="VM %s is running. Stop it before deletion or use force=yes." % vmid) - taskid = proxmox_node.qemu.delete(vmid) - if not proxmox.wait_for_task(vm['node'], taskid): - module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s' % - proxmox_node.tasks(taskid).log.get()[:1]) - else: - module.exit_json(changed=True, vmid=vmid, msg="VM %s removed" % vmid) - except Exception as e: - module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, e)) - - elif state == 'current': - status = {} - if not vmid: - module.fail_json(msg='VM with name = %s does not exist in cluster' % name) - vm = proxmox.get_vm(vmid) - if not name: - name = vm['name'] - current = proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).status.current.get()['status'] - status['status'] = current - if status: - module.exit_json(changed=False, vmid=vmid, msg="VM %s with vmid = %s is %s" % (name, vmid, current), **status) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_nic.py b/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_nic.py deleted file mode 100644 index e83d0dfe..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_nic.py +++ /dev/null @@ -1,304 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2021, Lammert Hellinga (@Kogelvis) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: proxmox_nic -short_description: Management of a NIC of a Qemu(KVM) VM in a Proxmox VE cluster. -version_added: 3.1.0 -description: - - Allows you to create/update/delete a NIC on Qemu(KVM) Virtual Machines in a Proxmox VE cluster. -author: "Lammert Hellinga (@Kogelvis) " -options: - bridge: - description: - - Add this interface to the specified bridge device. The Proxmox VE default bridge is called C(vmbr0). - type: str - firewall: - description: - - Whether this interface should be protected by the firewall. - type: bool - default: false - interface: - description: - - Name of the interface, should be C(net[n]) where C(1 ≤ n ≤ 31). - type: str - required: true - link_down: - description: - - Whether this interface should be disconnected (like pulling the plug). - type: bool - default: false - mac: - description: - - C(XX:XX:XX:XX:XX:XX) should be a unique MAC address. This is automatically generated if not specified. - - When not specified this module will keep the MAC address the same when changing an existing interface. - type: str - model: - description: - - The NIC emulator model. - type: str - choices: ['e1000', 'e1000-82540em', 'e1000-82544gc', 'e1000-82545em', 'i82551', 'i82557b', 'i82559er', 'ne2k_isa', 'ne2k_pci', 'pcnet', - 'rtl8139', 'virtio', 'vmxnet3'] - default: virtio - mtu: - description: - - Force MTU, for C(virtio) model only, setting will be ignored otherwise. - - Set to C(1) to use the bridge MTU. - - Value should be C(1 ≤ n ≤ 65520). - type: int - name: - description: - - Specifies the VM name. Only used on the configuration web interface. - - Required only for I(state=present). - type: str - queues: - description: - - Number of packet queues to be used on the device. - - Value should be C(0 ≤ n ≤ 16). - type: int - rate: - description: - - Rate limit in MBps (MegaBytes per second) as floating point number. - type: float - state: - description: - - Indicates desired state of the NIC. - type: str - choices: ['present', 'absent'] - default: present - tag: - description: - - VLAN tag to apply to packets on this interface. - - Value should be C(1 ≤ n ≤ 4094). - type: int - trunks: - description: - - List of VLAN trunks to pass through this interface. - type: list - elements: int - vmid: - description: - - Specifies the instance ID. - type: int -extends_documentation_fragment: - - community.general.proxmox.documentation -''' - -EXAMPLES = ''' -- name: Create NIC net0 targeting the vm by name - community.general.proxmox_nic: - api_user: root@pam - api_password: secret - api_host: proxmoxhost - name: my_vm - interface: net0 - bridge: vmbr0 - tag: 3 - -- name: Create NIC net0 targeting the vm by id - community.general.proxmox_nic: - api_user: root@pam - api_password: secret - api_host: proxmoxhost - vmid: 103 - interface: net0 - bridge: vmbr0 - mac: "12:34:56:C0:FF:EE" - firewall: true - -- name: Delete NIC net0 targeting the vm by name - community.general.proxmox_nic: - api_user: root@pam - api_password: secret - api_host: proxmoxhost - name: my_vm - interface: net0 - state: absent -''' - -RETURN = ''' -vmid: - description: The VM vmid. - returned: success - type: int - sample: 115 -msg: - description: A short message - returned: always - type: str - sample: "Nic net0 unchanged on VM with vmid 103" -''' - -from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible) - - -class ProxmoxNicAnsible(ProxmoxAnsible): - def update_nic(self, vmid, interface, model, **kwargs): - vm = self.get_vm(vmid) - - try: - vminfo = self.proxmox_api.nodes(vm['node']).qemu(vmid).config.get() - except Exception as e: - self.module.fail_json(msg='Getting information for VM with vmid = %s failed with exception: %s' % (vmid, e)) - - if interface in vminfo: - # Convert the current config to a dictionary - config = vminfo[interface].split(',') - config.sort() - - config_current = {} - - for i in config: - kv = i.split('=') - try: - config_current[kv[0]] = kv[1] - except IndexError: - config_current[kv[0]] = '' - - # determine the current model nic and mac-address - models = ['e1000', 'e1000-82540em', 'e1000-82544gc', 'e1000-82545em', 'i82551', 'i82557b', - 'i82559er', 'ne2k_isa', 'ne2k_pci', 'pcnet', 'rtl8139', 'virtio', 'vmxnet3'] - current_model = set(models) & set(config_current.keys()) - current_model = current_model.pop() - current_mac = config_current[current_model] - - # build nic config string - config_provided = "{0}={1}".format(model, current_mac) - else: - config_provided = model - - if kwargs['mac']: - config_provided = "{0}={1}".format(model, kwargs['mac']) - - if kwargs['bridge']: - config_provided += ",bridge={0}".format(kwargs['bridge']) - - if kwargs['firewall']: - config_provided += ",firewall=1" - - if kwargs['link_down']: - config_provided += ',link_down=1' - - if kwargs['mtu']: - config_provided += ",mtu={0}".format(kwargs['mtu']) - if model != 'virtio': - self.module.warn( - 'Ignoring MTU for nic {0} on VM with vmid {1}, ' - 'model should be set to \'virtio\': '.format(interface, vmid)) - - if kwargs['queues']: - config_provided += ",queues={0}".format(kwargs['queues']) - - if kwargs['rate']: - config_provided += ",rate={0}".format(kwargs['rate']) - - if kwargs['tag']: - config_provided += ",tag={0}".format(kwargs['tag']) - - if kwargs['trunks']: - config_provided += ",trunks={0}".format(';'.join(str(x) for x in kwargs['trunks'])) - - net = {interface: config_provided} - vm = self.get_vm(vmid) - - if ((interface not in vminfo) or (vminfo[interface] != config_provided)): - if not self.module.check_mode: - self.proxmox_api.nodes(vm['node']).qemu(vmid).config.set(**net) - return True - - return False - - def delete_nic(self, vmid, interface): - vm = self.get_vm(vmid) - vminfo = self.proxmox_api.nodes(vm['node']).qemu(vmid).config.get() - - if interface in vminfo: - if not self.module.check_mode: - self.proxmox_api.nodes(vm['node']).qemu(vmid).config.set(vmid=vmid, delete=interface) - return True - - return False - - -def main(): - module_args = proxmox_auth_argument_spec() - nic_args = dict( - bridge=dict(type='str'), - firewall=dict(type='bool', default=False), - interface=dict(type='str', required=True), - link_down=dict(type='bool', default=False), - mac=dict(type='str'), - model=dict(choices=['e1000', 'e1000-82540em', 'e1000-82544gc', 'e1000-82545em', - 'i82551', 'i82557b', 'i82559er', 'ne2k_isa', 'ne2k_pci', 'pcnet', - 'rtl8139', 'virtio', 'vmxnet3'], default='virtio'), - mtu=dict(type='int'), - name=dict(type='str'), - queues=dict(type='int'), - rate=dict(type='float'), - state=dict(default='present', choices=['present', 'absent']), - tag=dict(type='int'), - trunks=dict(type='list', elements='int'), - vmid=dict(type='int'), - ) - module_args.update(nic_args) - - module = AnsibleModule( - argument_spec=module_args, - required_together=[('api_token_id', 'api_token_secret')], - required_one_of=[('name', 'vmid'), ('api_password', 'api_token_id')], - supports_check_mode=True, - ) - - proxmox = ProxmoxNicAnsible(module) - - interface = module.params['interface'] - model = module.params['model'] - name = module.params['name'] - state = module.params['state'] - vmid = module.params['vmid'] - - # If vmid is not defined then retrieve its value from the vm name, - if not vmid: - vmid = proxmox.get_vmid(name) - - # Ensure VM id exists - proxmox.get_vm(vmid) - - if state == 'present': - try: - if proxmox.update_nic(vmid, interface, model, - bridge=module.params['bridge'], - firewall=module.params['firewall'], - link_down=module.params['link_down'], - mac=module.params['mac'], - mtu=module.params['mtu'], - queues=module.params['queues'], - rate=module.params['rate'], - tag=module.params['tag'], - trunks=module.params['trunks']): - module.exit_json(changed=True, vmid=vmid, msg="Nic {0} updated on VM with vmid {1}".format(interface, vmid)) - else: - module.exit_json(vmid=vmid, msg="Nic {0} unchanged on VM with vmid {1}".format(interface, vmid)) - except Exception as e: - module.fail_json(vmid=vmid, msg='Unable to change nic {0} on VM with vmid {1}: '.format(interface, vmid) + str(e)) - - elif state == 'absent': - try: - if proxmox.delete_nic(vmid, interface): - module.exit_json(changed=True, vmid=vmid, msg="Nic {0} deleted on VM with vmid {1}".format(interface, vmid)) - else: - module.exit_json(vmid=vmid, msg="Nic {0} does not exist on VM with vmid {1}".format(interface, vmid)) - except Exception as e: - module.fail_json(vmid=vmid, msg='Unable to delete nic {0} on VM with vmid {1}: '.format(interface, vmid) + str(e)) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_snap.py b/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_snap.py deleted file mode 100644 index cf570bd1..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_snap.py +++ /dev/null @@ -1,217 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2020, Jeffrey van Pelt (@Thulium-Drake) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: proxmox_snap -short_description: Snapshot management of instances in Proxmox VE cluster -version_added: 2.0.0 -description: - - Allows you to create/delete snapshots from instances in Proxmox VE cluster. - - Supports both KVM and LXC, OpenVZ has not been tested, as it is no longer supported on Proxmox VE. -options: - hostname: - description: - - The instance name. - type: str - vmid: - description: - - The instance id. - - If not set, will be fetched from PromoxAPI based on the hostname. - type: str - state: - description: - - Indicate desired state of the instance snapshot. - choices: ['present', 'absent'] - default: present - type: str - force: - description: - - For removal from config file, even if removing disk snapshot fails. - default: no - type: bool - vmstate: - description: - - Snapshot includes RAM. - default: no - type: bool - description: - description: - - Specify the description for the snapshot. Only used on the configuration web interface. - - This is saved as a comment inside the configuration file. - type: str - timeout: - description: - - Timeout for operations. - default: 30 - type: int - snapname: - description: - - Name of the snapshot that has to be created. - default: 'ansible_snap' - type: str - -notes: - - Requires proxmoxer and requests modules on host. These modules can be installed with pip. - - Supports C(check_mode). -requirements: [ "proxmoxer", "python >= 2.7", "requests" ] -author: Jeffrey van Pelt (@Thulium-Drake) -extends_documentation_fragment: - - community.general.proxmox.documentation -''' - -EXAMPLES = r''' -- name: Create new container snapshot - community.general.proxmox_snap: - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - vmid: 100 - state: present - snapname: pre-updates - -- name: Remove container snapshot - community.general.proxmox_snap: - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - vmid: 100 - state: absent - snapname: pre-updates -''' - -RETURN = r'''#''' - -import time -import traceback - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback -from ansible.module_utils.common.text.converters import to_native -from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible, HAS_PROXMOXER, PROXMOXER_IMP_ERR) - - -class ProxmoxSnapAnsible(ProxmoxAnsible): - def snapshot(self, vm, vmid): - return getattr(self.proxmox_api.nodes(vm['node']), vm['type'])(vmid).snapshot - - def snapshot_create(self, vm, vmid, timeout, snapname, description, vmstate): - if self.module.check_mode: - return True - - if vm['type'] == 'lxc': - taskid = self.snapshot(vm, vmid).post(snapname=snapname, description=description) - else: - taskid = self.snapshot(vm, vmid).post(snapname=snapname, description=description, vmstate=int(vmstate)) - while timeout: - if (self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['status'] == 'stopped' and - self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'): - return True - timeout -= 1 - if timeout == 0: - self.module.fail_json(msg='Reached timeout while waiting for creating VM snapshot. Last line in task before timeout: %s' % - self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) - - time.sleep(1) - return False - - def snapshot_remove(self, vm, vmid, timeout, snapname, force): - if self.module.check_mode: - return True - - taskid = self.snapshot(vm, vmid).delete(snapname, force=int(force)) - while timeout: - if (self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['status'] == 'stopped' and - self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'): - return True - timeout -= 1 - if timeout == 0: - self.module.fail_json(msg='Reached timeout while waiting for removing VM snapshot. Last line in task before timeout: %s' % - self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) - - time.sleep(1) - return False - - -def main(): - module_args = proxmox_auth_argument_spec() - snap_args = dict( - vmid=dict(required=False), - hostname=dict(), - timeout=dict(type='int', default=30), - state=dict(default='present', choices=['present', 'absent']), - description=dict(type='str'), - snapname=dict(type='str', default='ansible_snap'), - force=dict(type='bool', default='no'), - vmstate=dict(type='bool', default='no'), - ) - module_args.update(snap_args) - - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True - ) - - proxmox = ProxmoxSnapAnsible(module) - - state = module.params['state'] - vmid = module.params['vmid'] - hostname = module.params['hostname'] - description = module.params['description'] - snapname = module.params['snapname'] - timeout = module.params['timeout'] - force = module.params['force'] - vmstate = module.params['vmstate'] - - # If hostname is set get the VM id from ProxmoxAPI - if not vmid and hostname: - vmid = proxmox.get_vmid(hostname, choose_first_if_multiple=True) - elif not vmid: - module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state) - - vm = proxmox.get_vm(vmid) - - if state == 'present': - try: - for i in proxmox.snapshot(vm, vmid).get(): - if i['name'] == snapname: - module.exit_json(changed=False, msg="Snapshot %s is already present" % snapname) - - if proxmox.snapshot_create(vm, vmid, timeout, snapname, description, vmstate): - if module.check_mode: - module.exit_json(changed=False, msg="Snapshot %s would be created" % snapname) - else: - module.exit_json(changed=True, msg="Snapshot %s created" % snapname) - - except Exception as e: - module.fail_json(msg="Creating snapshot %s of VM %s failed with exception: %s" % (snapname, vmid, to_native(e))) - - elif state == 'absent': - try: - snap_exist = False - - for i in proxmox.snapshot(vm, vmid).get(): - if i['name'] == snapname: - snap_exist = True - continue - - if not snap_exist: - module.exit_json(changed=False, msg="Snapshot %s does not exist" % snapname) - else: - if proxmox.snapshot_remove(vm, vmid, timeout, snapname, force): - if module.check_mode: - module.exit_json(changed=False, msg="Snapshot %s would be removed" % snapname) - else: - module.exit_json(changed=True, msg="Snapshot %s removed" % snapname) - - except Exception as e: - module.fail_json(msg="Removing snapshot %s of VM %s failed with exception: %s" % (snapname, vmid, to_native(e))) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_storage_info.py b/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_storage_info.py deleted file mode 100644 index 265b6fba..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_storage_info.py +++ /dev/null @@ -1,187 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Tristan Le Guern (@tleguern) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: proxmox_storage_info -short_description: Retrieve information about one or more Proxmox VE storages -version_added: 2.2.0 -description: - - Retrieve information about one or more Proxmox VE storages. -options: - storage: - description: - - Only return informations on a specific storage. - aliases: ['name'] - type: str - type: - description: - - Filter on a specifc storage type. - type: str -author: Tristan Le Guern (@tleguern) -extends_documentation_fragment: community.general.proxmox.documentation -notes: - - Storage specific options can be returned by this module, please look at the documentation at U(https://pve.proxmox.com/wiki/Storage). -''' - - -EXAMPLES = ''' -- name: List existing storages - community.general.proxmox_storage_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - register: proxmox_storages - -- name: List NFS storages only - community.general.proxmox_storage_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - type: nfs - register: proxmox_storages_nfs - -- name: Retrieve information about the lvm2 storage - community.general.proxmox_storage_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - storage: lvm2 - register: proxmox_storage_lvm -''' - - -RETURN = ''' -proxmox_storages: - description: List of storage pools. - returned: on success - type: list - elements: dict - contains: - content: - description: Proxmox content types available in this storage - returned: on success - type: list - elements: str - digest: - description: Storage's digest - returned: on success - type: str - nodes: - description: List of nodes associated to this storage - returned: on success, if storage is not local - type: list - elements: str - path: - description: Physical path to this storage - returned: on success - type: str - prune-backups: - description: Backup retention options - returned: on success - type: list - elements: dict - shared: - description: Is this storage shared - returned: on success - type: bool - storage: - description: Storage name - returned: on success - type: str - type: - description: Storage type - returned: on success - type: str -''' - - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible_collections.community.general.plugins.module_utils.proxmox import ( - proxmox_auth_argument_spec, ProxmoxAnsible, proxmox_to_ansible_bool) - - -class ProxmoxStorageInfoAnsible(ProxmoxAnsible): - def get_storage(self, storage): - try: - storage = self.proxmox_api.storage.get(storage) - except Exception: - self.module.fail_json(msg="Storage '%s' does not exist" % storage) - return ProxmoxStorage(storage) - - def get_storages(self, type=None): - storages = self.proxmox_api.storage.get(type=type) - storages = [ProxmoxStorage(storage) for storage in storages] - return storages - - -class ProxmoxStorage: - def __init__(self, storage): - self.storage = storage - # Convert proxmox representation of lists, dicts and boolean for easier - # manipulation within ansible. - if 'shared' in self.storage: - self.storage['shared'] = proxmox_to_ansible_bool(self.storage['shared']) - if 'content' in self.storage: - self.storage['content'] = self.storage['content'].split(',') - if 'nodes' in self.storage: - self.storage['nodes'] = self.storage['nodes'].split(',') - if 'prune-backups' in storage: - options = storage['prune-backups'].split(',') - self.storage['prune-backups'] = dict() - for option in options: - k, v = option.split('=') - self.storage['prune-backups'][k] = v - - -def proxmox_storage_info_argument_spec(): - return dict( - storage=dict(type='str', aliases=['name']), - type=dict(type='str'), - ) - - -def main(): - module_args = proxmox_auth_argument_spec() - storage_info_args = proxmox_storage_info_argument_spec() - module_args.update(storage_info_args) - - module = AnsibleModule( - argument_spec=module_args, - required_one_of=[('api_password', 'api_token_id')], - required_together=[('api_token_id', 'api_token_secret')], - mutually_exclusive=[('storage', 'type')], - supports_check_mode=True - ) - result = dict( - changed=False - ) - - proxmox = ProxmoxStorageInfoAnsible(module) - storage = module.params['storage'] - storagetype = module.params['type'] - - if storage: - storages = [proxmox.get_storage(storage)] - else: - storages = proxmox.get_storages(type=storagetype) - result['proxmox_storages'] = [storage.storage for storage in storages] - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_tasks_info.py b/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_tasks_info.py deleted file mode 100644 index ff3bf686..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_tasks_info.py +++ /dev/null @@ -1,183 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Andreas Botzner (@paginabianca) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: proxmox_tasks_info -short_description: Retrieve information about one or more Proxmox VE tasks -version_added: 3.8.0 -description: - - Retrieve information about one or more Proxmox VE tasks. -author: 'Andreas Botzner (@paginabianca) ' -options: - node: - description: - - Node where to get tasks. - required: true - type: str - task: - description: - - Return specific task. - aliases: ['upid', 'name'] - type: str -extends_documentation_fragment: - - community.general.proxmox.documentation -''' - - -EXAMPLES = ''' -- name: List tasks on node01 - community.general.proxmox_task_info: - api_host: proxmoxhost - api_user: root@pam - api_password: '{{ password | default(omit) }}' - api_token_id: '{{ token_id | default(omit) }}' - api_token_secret: '{{ token_secret | default(omit) }}' - node: node01 - register: result - -- name: Retrieve information about specific tasks on node01 - community.general.proxmox_task_info: - api_host: proxmoxhost - api_user: root@pam - api_password: '{{ password | default(omit) }}' - api_token_id: '{{ token_id | default(omit) }}' - api_token_secret: '{{ token_secret | default(omit) }}' - task: 'UPID:node01:00003263:16167ACE:621EE230:srvreload:networking:root@pam:' - node: node01 - register: proxmox_tasks -''' - - -RETURN = ''' -proxmox_tasks: - description: List of tasks. - returned: on success - type: list - elements: dict - contains: - id: - description: ID of the task. - returned: on success - type: str - node: - description: Node name. - returned: on success - type: str - pid: - description: PID of the task. - returned: on success - type: int - pstart: - description: pastart of the task. - returned: on success - type: int - starttime: - description: Starting time of the task. - returned: on success - type: int - type: - description: Type of the task. - returned: on success - type: str - upid: - description: UPID of the task. - returned: on success - type: str - user: - description: User that owns the task. - returned: on success - type: str - endtime: - description: Endtime of the task. - returned: on success, can be absent - type: int - status: - description: Status of the task. - returned: on success, can be absent - type: str - failed: - description: If the task failed. - returned: when status is defined - type: bool -msg: - description: Short message. - returned: on failure - type: str - sample: 'Task: UPID:xyz:xyz does not exist on node: proxmoxnode' -''' - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible_collections.community.general.plugins.module_utils.proxmox import ( - proxmox_auth_argument_spec, ProxmoxAnsible) - - -class ProxmoxTaskInfoAnsible(ProxmoxAnsible): - def get_task(self, upid, node): - tasks = self.get_tasks(node) - for task in tasks: - if task.info['upid'] == upid: - return [task] - - def get_tasks(self, node): - tasks = self.proxmox_api.nodes(node).tasks.get() - return [ProxmoxTask(task) for task in tasks] - - -class ProxmoxTask: - def __init__(self, task): - self.info = dict() - for k, v in task.items(): - if k == 'status' and isinstance(v, str): - self.info[k] = v - if v != 'OK': - self.info['failed'] = True - else: - self.info[k] = v - - -def proxmox_task_info_argument_spec(): - return dict( - task=dict(type='str', aliases=['upid', 'name'], required=False), - node=dict(type='str', required=True), - ) - - -def main(): - module_args = proxmox_auth_argument_spec() - task_info_args = proxmox_task_info_argument_spec() - module_args.update(task_info_args) - - module = AnsibleModule( - argument_spec=module_args, - required_together=[('api_token_id', 'api_token_secret'), - ('api_user', 'api_password')], - required_one_of=[('api_password', 'api_token_id')], - supports_check_mode=True) - result = dict(changed=False) - - proxmox = ProxmoxTaskInfoAnsible(module) - upid = module.params['task'] - node = module.params['node'] - if upid: - tasks = proxmox.get_task(upid=upid, node=node) - else: - tasks = proxmox.get_tasks(node=node) - if tasks is not None: - result['proxmox_tasks'] = [task.info for task in tasks] - module.exit_json(**result) - else: - result['msg'] = 'Task: {0} does not exist on node: {1}.'.format( - upid, node) - module.fail_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_template.py b/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_template.py deleted file mode 100644 index 32ff8e7e..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_template.py +++ /dev/null @@ -1,240 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: proxmox_template -short_description: management of OS templates in Proxmox VE cluster -description: - - allows you to upload/delete templates in Proxmox VE cluster -options: - node: - description: - - Proxmox VE node on which to operate. - type: str - src: - description: - - path to uploaded file - - required only for C(state=present) - type: path - template: - description: - - the template name - - Required for state C(absent) to delete a template. - - Required for state C(present) to download an appliance container template (pveam). - type: str - content_type: - description: - - content type - - required only for C(state=present) - type: str - default: 'vztmpl' - choices: ['vztmpl', 'iso'] - storage: - description: - - target storage - type: str - default: 'local' - timeout: - description: - - timeout for operations - type: int - default: 30 - force: - description: - - can be used only with C(state=present), exists template will be overwritten - type: bool - default: 'no' - state: - description: - - Indicate desired state of the template - type: str - choices: ['present', 'absent'] - default: present -notes: - - Requires proxmoxer and requests modules on host. This modules can be installed with pip. -author: Sergei Antipov (@UnderGreen) -extends_documentation_fragment: community.general.proxmox.documentation -''' - -EXAMPLES = ''' -- name: Upload new openvz template with minimal options - community.general.proxmox_template: - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - src: ~/ubuntu-14.04-x86_64.tar.gz - -- name: > - Upload new openvz template with minimal options use environment - PROXMOX_PASSWORD variable(you should export it before) - community.general.proxmox_template: - node: uk-mc02 - api_user: root@pam - api_host: node1 - src: ~/ubuntu-14.04-x86_64.tar.gz - -- name: Upload new openvz template with all options and force overwrite - community.general.proxmox_template: - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - storage: local - content_type: vztmpl - src: ~/ubuntu-14.04-x86_64.tar.gz - force: yes - -- name: Delete template with minimal options - community.general.proxmox_template: - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - template: ubuntu-14.04-x86_64.tar.gz - state: absent - -- name: Download proxmox appliance container template - community.general.proxmox_template: - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - storage: local - content_type: vztmpl - template: ubuntu-20.04-standard_20.04-1_amd64.tar.gz -''' - -import os -import time - -from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible) - - -class ProxmoxTemplateAnsible(ProxmoxAnsible): - def get_template(self, node, storage, content_type, template): - return [True for tmpl in self.proxmox_api.nodes(node).storage(storage).content.get() - if tmpl['volid'] == '%s:%s/%s' % (storage, content_type, template)] - - def task_status(self, node, taskid, timeout): - """ - Check the task status and wait until the task is completed or the timeout is reached. - """ - while timeout: - task_status = self.proxmox_api.nodes(node).tasks(taskid).status.get() - if task_status['status'] == 'stopped' and task_status['exitstatus'] == 'OK': - return True - timeout = timeout - 1 - if timeout == 0: - self.module.fail_json(msg='Reached timeout while waiting for uploading/downloading template. Last line in task before timeout: %s' % - self.proxmox_api.node(node).tasks(taskid).log.get()[:1]) - - time.sleep(1) - return False - - def upload_template(self, node, storage, content_type, realpath, timeout): - taskid = self.proxmox_api.nodes(node).storage(storage).upload.post(content=content_type, filename=open(realpath, 'rb')) - return self.task_status(node, taskid, timeout) - - def download_template(self, node, storage, template, timeout): - taskid = self.proxmox_api.nodes(node).aplinfo.post(storage=storage, template=template) - return self.task_status(node, taskid, timeout) - - def delete_template(self, node, storage, content_type, template, timeout): - volid = '%s:%s/%s' % (storage, content_type, template) - self.proxmox_api.nodes(node).storage(storage).content.delete(volid) - while timeout: - if not self.get_template(node, storage, content_type, template): - return True - timeout = timeout - 1 - if timeout == 0: - self.module.fail_json(msg='Reached timeout while waiting for deleting template.') - - time.sleep(1) - return False - - -def main(): - module_args = proxmox_auth_argument_spec() - template_args = dict( - node=dict(), - src=dict(type='path'), - template=dict(), - content_type=dict(default='vztmpl', choices=['vztmpl', 'iso']), - storage=dict(default='local'), - timeout=dict(type='int', default=30), - force=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent']), - ) - module_args.update(template_args) - - module = AnsibleModule( - argument_spec=module_args, - required_together=[('api_token_id', 'api_token_secret')], - required_one_of=[('api_password', 'api_token_id')], - required_if=[('state', 'absent', ['template'])] - ) - - proxmox = ProxmoxTemplateAnsible(module) - - state = module.params['state'] - node = module.params['node'] - storage = module.params['storage'] - timeout = module.params['timeout'] - - if state == 'present': - try: - content_type = module.params['content_type'] - src = module.params['src'] - - # download appliance template - if content_type == 'vztmpl' and not src: - template = module.params['template'] - - if not template: - module.fail_json(msg='template param for downloading appliance template is mandatory') - - if proxmox.get_template(node, storage, content_type, template) and not module.params['force']: - module.exit_json(changed=False, msg='template with volid=%s:%s/%s already exists' % (storage, content_type, template)) - - if proxmox.download_template(node, storage, template, timeout): - module.exit_json(changed=True, msg='template with volid=%s:%s/%s downloaded' % (storage, content_type, template)) - - template = os.path.basename(src) - if proxmox.get_template(node, storage, content_type, template) and not module.params['force']: - module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already exists' % (storage, content_type, template)) - elif not src: - module.fail_json(msg='src param to uploading template file is mandatory') - elif not (os.path.exists(src) and os.path.isfile(src)): - module.fail_json(msg='template file on path %s not exists' % src) - - if proxmox.upload_template(node, storage, content_type, src, timeout): - module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template)) - except Exception as e: - module.fail_json(msg="uploading/downloading of template %s failed with exception: %s" % (template, e)) - - elif state == 'absent': - try: - content_type = module.params['content_type'] - template = module.params['template'] - - if not proxmox.get_template(node, storage, content_type, template): - module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already deleted' % (storage, content_type, template)) - - if proxmox.delete_template(node, storage, content_type, template, timeout): - module.exit_json(changed=True, msg='template with volid=%s:%s/%s deleted' % (storage, content_type, template)) - except Exception as e: - module.fail_json(msg="deleting of template %s failed with exception: %s" % (template, e)) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_user_info.py b/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_user_info.py deleted file mode 100644 index d0ee365b..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_user_info.py +++ /dev/null @@ -1,253 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Tristan Le Guern -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: proxmox_user_info -short_description: Retrieve information about one or more Proxmox VE users -version_added: 1.3.0 -description: - - Retrieve information about one or more Proxmox VE users -options: - domain: - description: - - Restrict results to a specific authentication realm. - aliases: ['realm'] - type: str - user: - description: - - Restrict results to a specific user. - aliases: ['name'] - type: str - userid: - description: - - Restrict results to a specific user ID, which is a concatenation of a user and domain parts. - type: str -author: Tristan Le Guern (@tleguern) -extends_documentation_fragment: community.general.proxmox.documentation -''' - -EXAMPLES = ''' -- name: List existing users - community.general.proxmox_user_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - register: proxmox_users - -- name: List existing users in the pve authentication realm - community.general.proxmox_user_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - domain: pve - register: proxmox_users_pve - -- name: Retrieve information about admin@pve - community.general.proxmox_user_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - userid: admin@pve - register: proxmox_user_admin - -- name: Alternative way to retrieve information about admin@pve - community.general.proxmox_user_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - user: admin - domain: pve - register: proxmox_user_admin -''' - - -RETURN = ''' -proxmox_users: - description: List of users. - returned: always, but can be empty - type: list - elements: dict - contains: - comment: - description: Short description of the user. - returned: on success - type: str - domain: - description: User's authentication realm, also the right part of the user ID. - returned: on success - type: str - email: - description: User's email address. - returned: on success - type: str - enabled: - description: User's account state. - returned: on success - type: bool - expire: - description: Expiration date in seconds since EPOCH. Zero means no expiration. - returned: on success - type: int - firstname: - description: User's first name. - returned: on success - type: str - groups: - description: List of groups which the user is a member of. - returned: on success - type: list - elements: str - keys: - description: User's two factor authentication keys. - returned: on success - type: str - lastname: - description: User's last name. - returned: on success - type: str - tokens: - description: List of API tokens associated to the user. - returned: on success - type: list - elements: dict - contains: - comment: - description: Short description of the token. - returned: on success - type: str - expire: - description: Expiration date in seconds since EPOCH. Zero means no expiration. - returned: on success - type: int - privsep: - description: Describe if the API token is further restricted with ACLs or is fully privileged. - returned: on success - type: bool - tokenid: - description: Token name. - returned: on success - type: str - user: - description: User's login name, also the left part of the user ID. - returned: on success - type: str - userid: - description: Proxmox user ID, represented as user@realm. - returned: on success - type: str -''' - - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible_collections.community.general.plugins.module_utils.proxmox import ( - proxmox_auth_argument_spec, ProxmoxAnsible, proxmox_to_ansible_bool) - - -class ProxmoxUserInfoAnsible(ProxmoxAnsible): - def get_user(self, userid): - try: - user = self.proxmox_api.access.users.get(userid) - except Exception: - self.module.fail_json(msg="User '%s' does not exist" % userid) - user['userid'] = userid - return ProxmoxUser(user) - - def get_users(self, domain=None): - users = self.proxmox_api.access.users.get(full=1) - users = [ProxmoxUser(user) for user in users] - if domain: - return [user for user in users if user.user['domain'] == domain] - return users - - -class ProxmoxUser: - def __init__(self, user): - self.user = dict() - # Data representation is not the same depending on API calls - for k, v in user.items(): - if k == 'enable': - self.user['enabled'] = proxmox_to_ansible_bool(user['enable']) - elif k == 'userid': - self.user['user'] = user['userid'].split('@')[0] - self.user['domain'] = user['userid'].split('@')[1] - self.user[k] = v - elif k in ['groups', 'tokens'] and (v == '' or v is None): - self.user[k] = [] - elif k == 'groups' and type(v) == str: - self.user['groups'] = v.split(',') - elif k == 'tokens' and type(v) == list: - for token in v: - if 'privsep' in token: - token['privsep'] = proxmox_to_ansible_bool(token['privsep']) - self.user['tokens'] = v - elif k == 'tokens' and type(v) == dict: - self.user['tokens'] = list() - for tokenid, tokenvalues in v.items(): - t = tokenvalues - t['tokenid'] = tokenid - if 'privsep' in tokenvalues: - t['privsep'] = proxmox_to_ansible_bool(tokenvalues['privsep']) - self.user['tokens'].append(t) - else: - self.user[k] = v - - -def proxmox_user_info_argument_spec(): - return dict( - domain=dict(type='str', aliases=['realm']), - user=dict(type='str', aliases=['name']), - userid=dict(type='str'), - ) - - -def main(): - module_args = proxmox_auth_argument_spec() - user_info_args = proxmox_user_info_argument_spec() - module_args.update(user_info_args) - - module = AnsibleModule( - argument_spec=module_args, - required_one_of=[('api_password', 'api_token_id')], - required_together=[('api_token_id', 'api_token_secret')], - mutually_exclusive=[('user', 'userid'), ('domain', 'userid')], - supports_check_mode=True - ) - result = dict( - changed=False - ) - - proxmox = ProxmoxUserInfoAnsible(module) - domain = module.params['domain'] - user = module.params['user'] - if user and domain: - userid = user + '@' + domain - else: - userid = module.params['userid'] - - if userid: - users = [proxmox.get_user(userid=userid)] - else: - users = proxmox.get_users(domain=domain) - result['proxmox_users'] = [user.user for user in users] - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/misc/rhevm.py b/ansible_collections/community/general/plugins/modules/cloud/misc/rhevm.py deleted file mode 100644 index 77b40248..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/misc/rhevm.py +++ /dev/null @@ -1,1498 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Timothy Vandenbrande -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: rhevm -short_description: RHEV/oVirt automation -description: - - This module only supports oVirt/RHEV version 3. - - A newer module M(ovirt.ovirt.ovirt_vm) supports oVirt/RHV version 4. - - Allows you to create/remove/update or powermanage virtual machines on a RHEV/oVirt platform. -requirements: - - ovirtsdk -author: -- Timothy Vandenbrande (@TimothyVandenbrande) -options: - user: - description: - - The user to authenticate with. - type: str - default: admin@internal - password: - description: - - The password for user authentication. - type: str - required: true - server: - description: - - The name/IP of your RHEV-m/oVirt instance. - type: str - default: 127.0.0.1 - port: - description: - - The port on which the API is reachable. - type: int - default: 443 - insecure_api: - description: - - A boolean switch to make a secure or insecure connection to the server. - type: bool - default: no - name: - description: - - The name of the VM. - type: str - cluster: - description: - - The RHEV/oVirt cluster in which you want you VM to start. - type: str - datacenter: - description: - - The RHEV/oVirt datacenter in which you want you VM to start. - type: str - default: Default - state: - description: - - This serves to create/remove/update or powermanage your VM. - type: str - choices: [ absent, cd, down, info, ping, present, restarted, up ] - default: present - image: - description: - - The template to use for the VM. - type: str - type: - description: - - To define if the VM is a server or desktop. - type: str - choices: [ desktop, host, server ] - default: server - vmhost: - description: - - The host you wish your VM to run on. - type: str - vmcpu: - description: - - The number of CPUs you want in your VM. - type: int - default: 2 - cpu_share: - description: - - This parameter is used to configure the CPU share. - type: int - default: 0 - vmmem: - description: - - The amount of memory you want your VM to use (in GB). - type: int - default: 1 - osver: - description: - - The operating system option in RHEV/oVirt. - type: str - default: rhel_6x64 - mempol: - description: - - The minimum amount of memory you wish to reserve for this system. - type: int - default: 1 - vm_ha: - description: - - To make your VM High Available. - type: bool - default: yes - disks: - description: - - This option uses complex arguments and is a list of disks with the options name, size and domain. - type: list - elements: str - ifaces: - description: - - This option uses complex arguments and is a list of interfaces with the options name and vlan. - type: list - elements: str - aliases: [ interfaces, nics ] - boot_order: - description: - - This option uses complex arguments and is a list of items that specify the bootorder. - type: list - elements: str - default: [ hd, network ] - del_prot: - description: - - This option sets the delete protection checkbox. - type: bool - default: yes - cd_drive: - description: - - The CD you wish to have mounted on the VM when I(state = 'CD'). - type: str - timeout: - description: - - The timeout you wish to define for power actions. - - When I(state = 'up'). - - When I(state = 'down'). - - When I(state = 'restarted'). - type: int -''' - -RETURN = r''' -vm: - description: Returns all of the VMs variables and execution. - returned: always - type: dict - sample: '{ - "boot_order": [ - "hd", - "network" - ], - "changed": true, - "changes": [ - "Delete Protection" - ], - "cluster": "C1", - "cpu_share": "0", - "created": false, - "datacenter": "Default", - "del_prot": true, - "disks": [ - { - "domain": "ssd-san", - "name": "OS", - "size": 40 - } - ], - "eth0": "00:00:5E:00:53:00", - "eth1": "00:00:5E:00:53:01", - "eth2": "00:00:5E:00:53:02", - "exists": true, - "failed": false, - "ifaces": [ - { - "name": "eth0", - "vlan": "Management" - }, - { - "name": "eth1", - "vlan": "Internal" - }, - { - "name": "eth2", - "vlan": "External" - } - ], - "image": false, - "mempol": "0", - "msg": [ - "VM exists", - "cpu_share was already set to 0", - "VM high availability was already set to True", - "The boot order has already been set", - "VM delete protection has been set to True", - "Disk web2_Disk0_OS already exists", - "The VM starting host was already set to host416" - ], - "name": "web2", - "type": "server", - "uuid": "4ba5a1be-e60b-4368-9533-920f156c817b", - "vm_ha": true, - "vmcpu": "4", - "vmhost": "host416", - "vmmem": "16" - }' -''' - -EXAMPLES = r''' -- name: Basic get info from VM - community.general.rhevm: - server: rhevm01 - user: '{{ rhev.admin.name }}' - password: '{{ rhev.admin.pass }}' - name: demo - state: info - -- name: Basic create example from image - community.general.rhevm: - server: rhevm01 - user: '{{ rhev.admin.name }}' - password: '{{ rhev.admin.pass }}' - name: demo - cluster: centos - image: centos7_x64 - state: present - -- name: Power management - community.general.rhevm: - server: rhevm01 - user: '{{ rhev.admin.name }}' - password: '{{ rhev.admin.pass }}' - cluster: RH - name: uptime_server - image: centos7_x64 - state: down - -- name: Multi disk, multi nic create example - community.general.rhevm: - server: rhevm01 - user: '{{ rhev.admin.name }}' - password: '{{ rhev.admin.pass }}' - cluster: RH - name: server007 - type: server - vmcpu: 4 - vmmem: 2 - ifaces: - - name: eth0 - vlan: vlan2202 - - name: eth1 - vlan: vlan36 - - name: eth2 - vlan: vlan38 - - name: eth3 - vlan: vlan2202 - disks: - - name: root - size: 10 - domain: ssd-san - - name: swap - size: 10 - domain: 15kiscsi-san - - name: opt - size: 10 - domain: 15kiscsi-san - - name: var - size: 10 - domain: 10kiscsi-san - - name: home - size: 10 - domain: sata-san - boot_order: - - network - - hd - state: present - -- name: Add a CD to the disk cd_drive - community.general.rhevm: - user: '{{ rhev.admin.name }}' - password: '{{ rhev.admin.pass }}' - name: server007 - cd_drive: rhev-tools-setup.iso - state: cd - -- name: New host deployment + host network configuration - community.general.rhevm: - password: '{{ rhevm.admin.pass }}' - name: ovirt_node007 - type: host - cluster: rhevm01 - ifaces: - - name: em1 - - name: em2 - - name: p3p1 - ip: 172.31.224.200 - netmask: 255.255.254.0 - - name: p3p2 - ip: 172.31.225.200 - netmask: 255.255.254.0 - - name: bond0 - bond: - - em1 - - em2 - network: rhevm - ip: 172.31.222.200 - netmask: 255.255.255.0 - management: yes - - name: bond0.36 - network: vlan36 - ip: 10.2.36.200 - netmask: 255.255.254.0 - gateway: 10.2.36.254 - - name: bond0.2202 - network: vlan2202 - - name: bond0.38 - network: vlan38 - state: present -''' - -import time - -try: - from ovirtsdk.api import API - from ovirtsdk.xml import params - HAS_SDK = True -except ImportError: - HAS_SDK = False - -from ansible.module_utils.basic import AnsibleModule - - -RHEV_FAILED = 1 -RHEV_SUCCESS = 0 -RHEV_UNAVAILABLE = 2 - -RHEV_TYPE_OPTS = ['desktop', 'host', 'server'] -STATE_OPTS = ['absent', 'cd', 'down', 'info', 'ping', 'present', 'restart', 'up'] - -msg = [] -changed = False -failed = False - - -class RHEVConn(object): - 'Connection to RHEV-M' - - def __init__(self, module): - self.module = module - - user = module.params.get('user') - password = module.params.get('password') - server = module.params.get('server') - port = module.params.get('port') - insecure_api = module.params.get('insecure_api') - - url = "https://%s:%s" % (server, port) - - try: - api = API(url=url, username=user, password=password, insecure=str(insecure_api)) - api.test() - self.conn = api - except Exception: - raise Exception("Failed to connect to RHEV-M.") - - def __del__(self): - self.conn.disconnect() - - def createVMimage(self, name, cluster, template): - try: - vmparams = params.VM( - name=name, - cluster=self.conn.clusters.get(name=cluster), - template=self.conn.templates.get(name=template), - disks=params.Disks(clone=True) - ) - self.conn.vms.add(vmparams) - setMsg("VM is created") - setChanged() - return True - except Exception as e: - setMsg("Failed to create VM") - setMsg(str(e)) - setFailed() - return False - - def createVM(self, name, cluster, os, actiontype): - try: - vmparams = params.VM( - name=name, - cluster=self.conn.clusters.get(name=cluster), - os=params.OperatingSystem(type_=os), - template=self.conn.templates.get(name="Blank"), - type_=actiontype - ) - self.conn.vms.add(vmparams) - setMsg("VM is created") - setChanged() - return True - except Exception as e: - setMsg("Failed to create VM") - setMsg(str(e)) - setFailed() - return False - - def createDisk(self, vmname, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot): - VM = self.get_VM(vmname) - - newdisk = params.Disk( - name=diskname, - size=1024 * 1024 * 1024 * int(disksize), - wipe_after_delete=True, - sparse=diskallocationtype, - interface=diskinterface, - format=diskformat, - bootable=diskboot, - storage_domains=params.StorageDomains( - storage_domain=[self.get_domain(diskdomain)] - ) - ) - - try: - VM.disks.add(newdisk) - VM.update() - setMsg("Successfully added disk " + diskname) - setChanged() - except Exception as e: - setFailed() - setMsg("Error attaching " + diskname + "disk, please recheck and remove any leftover configuration.") - setMsg(str(e)) - return False - - try: - currentdisk = VM.disks.get(name=diskname) - attempt = 1 - while currentdisk.status.state != 'ok': - currentdisk = VM.disks.get(name=diskname) - if attempt == 100: - setMsg("Error, disk %s, state %s" % (diskname, str(currentdisk.status.state))) - raise Exception() - else: - attempt += 1 - time.sleep(2) - setMsg("The disk " + diskname + " is ready.") - except Exception as e: - setFailed() - setMsg("Error getting the state of " + diskname + ".") - setMsg(str(e)) - return False - return True - - def createNIC(self, vmname, nicname, vlan, interface): - VM = self.get_VM(vmname) - CLUSTER = self.get_cluster_byid(VM.cluster.id) - DC = self.get_DC_byid(CLUSTER.data_center.id) - newnic = params.NIC( - name=nicname, - network=DC.networks.get(name=vlan), - interface=interface - ) - - try: - VM.nics.add(newnic) - VM.update() - setMsg("Successfully added iface " + nicname) - setChanged() - except Exception as e: - setFailed() - setMsg("Error attaching " + nicname + " iface, please recheck and remove any leftover configuration.") - setMsg(str(e)) - return False - - try: - currentnic = VM.nics.get(name=nicname) - attempt = 1 - while currentnic.active is not True: - currentnic = VM.nics.get(name=nicname) - if attempt == 100: - setMsg("Error, iface %s, state %s" % (nicname, str(currentnic.active))) - raise Exception() - else: - attempt += 1 - time.sleep(2) - setMsg("The iface " + nicname + " is ready.") - except Exception as e: - setFailed() - setMsg("Error getting the state of " + nicname + ".") - setMsg(str(e)) - return False - return True - - def get_DC(self, dc_name): - return self.conn.datacenters.get(name=dc_name) - - def get_DC_byid(self, dc_id): - return self.conn.datacenters.get(id=dc_id) - - def get_VM(self, vm_name): - return self.conn.vms.get(name=vm_name) - - def get_cluster_byid(self, cluster_id): - return self.conn.clusters.get(id=cluster_id) - - def get_cluster(self, cluster_name): - return self.conn.clusters.get(name=cluster_name) - - def get_domain_byid(self, dom_id): - return self.conn.storagedomains.get(id=dom_id) - - def get_domain(self, domain_name): - return self.conn.storagedomains.get(name=domain_name) - - def get_disk(self, disk): - return self.conn.disks.get(disk) - - def get_network(self, dc_name, network_name): - return self.get_DC(dc_name).networks.get(network_name) - - def get_network_byid(self, network_id): - return self.conn.networks.get(id=network_id) - - def get_NIC(self, vm_name, nic_name): - return self.get_VM(vm_name).nics.get(nic_name) - - def get_Host(self, host_name): - return self.conn.hosts.get(name=host_name) - - def get_Host_byid(self, host_id): - return self.conn.hosts.get(id=host_id) - - def set_Memory(self, name, memory): - VM = self.get_VM(name) - VM.memory = int(int(memory) * 1024 * 1024 * 1024) - try: - VM.update() - setMsg("The Memory has been updated.") - setChanged() - return True - except Exception as e: - setMsg("Failed to update memory.") - setMsg(str(e)) - setFailed() - return False - - def set_Memory_Policy(self, name, memory_policy): - VM = self.get_VM(name) - VM.memory_policy.guaranteed = int(memory_policy) * 1024 * 1024 * 1024 - try: - VM.update() - setMsg("The memory policy has been updated.") - setChanged() - return True - except Exception as e: - setMsg("Failed to update memory policy.") - setMsg(str(e)) - setFailed() - return False - - def set_CPU(self, name, cpu): - VM = self.get_VM(name) - VM.cpu.topology.cores = int(cpu) - try: - VM.update() - setMsg("The number of CPUs has been updated.") - setChanged() - return True - except Exception as e: - setMsg("Failed to update the number of CPUs.") - setMsg(str(e)) - setFailed() - return False - - def set_CPU_share(self, name, cpu_share): - VM = self.get_VM(name) - VM.cpu_shares = int(cpu_share) - try: - VM.update() - setMsg("The CPU share has been updated.") - setChanged() - return True - except Exception as e: - setMsg("Failed to update the CPU share.") - setMsg(str(e)) - setFailed() - return False - - def set_Disk(self, diskname, disksize, diskinterface, diskboot): - DISK = self.get_disk(diskname) - setMsg("Checking disk " + diskname) - if DISK.get_bootable() != diskboot: - try: - DISK.set_bootable(diskboot) - setMsg("Updated the boot option on the disk.") - setChanged() - except Exception as e: - setMsg("Failed to set the boot option on the disk.") - setMsg(str(e)) - setFailed() - return False - else: - setMsg("The boot option of the disk is correct") - if int(DISK.size) < (1024 * 1024 * 1024 * int(disksize)): - try: - DISK.size = (1024 * 1024 * 1024 * int(disksize)) - setMsg("Updated the size of the disk.") - setChanged() - except Exception as e: - setMsg("Failed to update the size of the disk.") - setMsg(str(e)) - setFailed() - return False - elif int(DISK.size) > (1024 * 1024 * 1024 * int(disksize)): - setMsg("Shrinking disks is not supported") - setFailed() - return False - else: - setMsg("The size of the disk is correct") - if str(DISK.interface) != str(diskinterface): - try: - DISK.interface = diskinterface - setMsg("Updated the interface of the disk.") - setChanged() - except Exception as e: - setMsg("Failed to update the interface of the disk.") - setMsg(str(e)) - setFailed() - return False - else: - setMsg("The interface of the disk is correct") - return True - - def set_NIC(self, vmname, nicname, newname, vlan, interface): - NIC = self.get_NIC(vmname, nicname) - VM = self.get_VM(vmname) - CLUSTER = self.get_cluster_byid(VM.cluster.id) - DC = self.get_DC_byid(CLUSTER.data_center.id) - NETWORK = self.get_network(str(DC.name), vlan) - checkFail() - if NIC.name != newname: - NIC.name = newname - setMsg('Updating iface name to ' + newname) - setChanged() - if str(NIC.network.id) != str(NETWORK.id): - NIC.set_network(NETWORK) - setMsg('Updating iface network to ' + vlan) - setChanged() - if NIC.interface != interface: - NIC.interface = interface - setMsg('Updating iface interface to ' + interface) - setChanged() - try: - NIC.update() - setMsg('iface has successfully been updated.') - except Exception as e: - setMsg("Failed to update the iface.") - setMsg(str(e)) - setFailed() - return False - return True - - def set_DeleteProtection(self, vmname, del_prot): - VM = self.get_VM(vmname) - VM.delete_protected = del_prot - try: - VM.update() - setChanged() - except Exception as e: - setMsg("Failed to update delete protection.") - setMsg(str(e)) - setFailed() - return False - return True - - def set_BootOrder(self, vmname, boot_order): - VM = self.get_VM(vmname) - bootorder = [] - for device in boot_order: - bootorder.append(params.Boot(dev=device)) - VM.os.boot = bootorder - - try: - VM.update() - setChanged() - except Exception as e: - setMsg("Failed to update the boot order.") - setMsg(str(e)) - setFailed() - return False - return True - - def set_Host(self, host_name, cluster, ifaces): - HOST = self.get_Host(host_name) - CLUSTER = self.get_cluster(cluster) - - if HOST is None: - setMsg("Host does not exist.") - ifacelist = dict() - networklist = [] - manageip = '' - - try: - for iface in ifaces: - try: - setMsg('creating host interface ' + iface['name']) - if 'management' in iface: - manageip = iface['ip'] - if 'boot_protocol' not in iface: - if 'ip' in iface: - iface['boot_protocol'] = 'static' - else: - iface['boot_protocol'] = 'none' - if 'ip' not in iface: - iface['ip'] = '' - if 'netmask' not in iface: - iface['netmask'] = '' - if 'gateway' not in iface: - iface['gateway'] = '' - - if 'network' in iface: - if 'bond' in iface: - bond = [] - for slave in iface['bond']: - bond.append(ifacelist[slave]) - try: - tmpiface = params.Bonding( - slaves=params.Slaves(host_nic=bond), - options=params.Options( - option=[ - params.Option(name='miimon', value='100'), - params.Option(name='mode', value='4') - ] - ) - ) - except Exception as e: - setMsg('Failed to create the bond for ' + iface['name']) - setFailed() - setMsg(str(e)) - return False - try: - tmpnetwork = params.HostNIC( - network=params.Network(name=iface['network']), - name=iface['name'], - boot_protocol=iface['boot_protocol'], - ip=params.IP( - address=iface['ip'], - netmask=iface['netmask'], - gateway=iface['gateway'] - ), - override_configuration=True, - bonding=tmpiface) - networklist.append(tmpnetwork) - setMsg('Applying network ' + iface['name']) - except Exception as e: - setMsg('Failed to set' + iface['name'] + ' as network interface') - setFailed() - setMsg(str(e)) - return False - else: - tmpnetwork = params.HostNIC( - network=params.Network(name=iface['network']), - name=iface['name'], - boot_protocol=iface['boot_protocol'], - ip=params.IP( - address=iface['ip'], - netmask=iface['netmask'], - gateway=iface['gateway'] - )) - networklist.append(tmpnetwork) - setMsg('Applying network ' + iface['name']) - else: - tmpiface = params.HostNIC( - name=iface['name'], - network=params.Network(), - boot_protocol=iface['boot_protocol'], - ip=params.IP( - address=iface['ip'], - netmask=iface['netmask'], - gateway=iface['gateway'] - )) - ifacelist[iface['name']] = tmpiface - except Exception as e: - setMsg('Failed to set ' + iface['name']) - setFailed() - setMsg(str(e)) - return False - except Exception as e: - setMsg('Failed to set networks') - setMsg(str(e)) - setFailed() - return False - - if manageip == '': - setMsg('No management network is defined') - setFailed() - return False - - try: - HOST = params.Host(name=host_name, address=manageip, cluster=CLUSTER, ssh=params.SSH(authentication_method='publickey')) - if self.conn.hosts.add(HOST): - setChanged() - HOST = self.get_Host(host_name) - state = HOST.status.state - while (state != 'non_operational' and state != 'up'): - HOST = self.get_Host(host_name) - state = HOST.status.state - time.sleep(1) - if state == 'non_responsive': - setMsg('Failed to add host to RHEVM') - setFailed() - return False - - setMsg('status host: up') - time.sleep(5) - - HOST = self.get_Host(host_name) - state = HOST.status.state - setMsg('State before setting to maintenance: ' + str(state)) - HOST.deactivate() - while state != 'maintenance': - HOST = self.get_Host(host_name) - state = HOST.status.state - time.sleep(1) - setMsg('status host: maintenance') - - try: - HOST.nics.setupnetworks(params.Action( - force=True, - check_connectivity=False, - host_nics=params.HostNics(host_nic=networklist) - )) - setMsg('nics are set') - except Exception as e: - setMsg('Failed to apply networkconfig') - setFailed() - setMsg(str(e)) - return False - - try: - HOST.commitnetconfig() - setMsg('Network config is saved') - except Exception as e: - setMsg('Failed to save networkconfig') - setFailed() - setMsg(str(e)) - return False - except Exception as e: - if 'The Host name is already in use' in str(e): - setMsg("Host already exists") - else: - setMsg("Failed to add host") - setFailed() - setMsg(str(e)) - return False - - HOST.activate() - while state != 'up': - HOST = self.get_Host(host_name) - state = HOST.status.state - time.sleep(1) - if state == 'non_responsive': - setMsg('Failed to apply networkconfig.') - setFailed() - return False - setMsg('status host: up') - else: - setMsg("Host exists.") - - return True - - def del_NIC(self, vmname, nicname): - return self.get_NIC(vmname, nicname).delete() - - def remove_VM(self, vmname): - VM = self.get_VM(vmname) - try: - VM.delete() - except Exception as e: - setMsg("Failed to remove VM.") - setMsg(str(e)) - setFailed() - return False - return True - - def start_VM(self, vmname, timeout): - VM = self.get_VM(vmname) - try: - VM.start() - except Exception as e: - setMsg("Failed to start VM.") - setMsg(str(e)) - setFailed() - return False - return self.wait_VM(vmname, "up", timeout) - - def wait_VM(self, vmname, state, timeout): - VM = self.get_VM(vmname) - while VM.status.state != state: - VM = self.get_VM(vmname) - time.sleep(10) - if timeout is not False: - timeout -= 10 - if timeout <= 0: - setMsg("Timeout expired") - setFailed() - return False - return True - - def stop_VM(self, vmname, timeout): - VM = self.get_VM(vmname) - try: - VM.stop() - except Exception as e: - setMsg("Failed to stop VM.") - setMsg(str(e)) - setFailed() - return False - return self.wait_VM(vmname, "down", timeout) - - def set_CD(self, vmname, cd_drive): - VM = self.get_VM(vmname) - try: - if str(VM.status.state) == 'down': - cdrom = params.CdRom(file=cd_drive) - VM.cdroms.add(cdrom) - setMsg("Attached the image.") - setChanged() - else: - cdrom = VM.cdroms.get(id="00000000-0000-0000-0000-000000000000") - cdrom.set_file(cd_drive) - cdrom.update(current=True) - setMsg("Attached the image.") - setChanged() - except Exception as e: - setMsg("Failed to attach image.") - setMsg(str(e)) - setFailed() - return False - return True - - def set_VM_Host(self, vmname, vmhost): - VM = self.get_VM(vmname) - HOST = self.get_Host(vmhost) - try: - VM.placement_policy.host = HOST - VM.update() - setMsg("Set startup host to " + vmhost) - setChanged() - except Exception as e: - setMsg("Failed to set startup host.") - setMsg(str(e)) - setFailed() - return False - return True - - def migrate_VM(self, vmname, vmhost): - VM = self.get_VM(vmname) - - HOST = self.get_Host_byid(VM.host.id) - if str(HOST.name) != vmhost: - try: - VM.migrate( - action=params.Action( - host=params.Host( - name=vmhost, - ) - ), - ) - setChanged() - setMsg("VM migrated to " + vmhost) - except Exception as e: - setMsg("Failed to set startup host.") - setMsg(str(e)) - setFailed() - return False - return True - - def remove_CD(self, vmname): - VM = self.get_VM(vmname) - try: - VM.cdroms.get(id="00000000-0000-0000-0000-000000000000").delete() - setMsg("Removed the image.") - setChanged() - except Exception as e: - setMsg("Failed to remove the image.") - setMsg(str(e)) - setFailed() - return False - return True - - -class RHEV(object): - def __init__(self, module): - self.module = module - - def __get_conn(self): - self.conn = RHEVConn(self.module) - return self.conn - - def test(self): - self.__get_conn() - return "OK" - - def getVM(self, name): - self.__get_conn() - VM = self.conn.get_VM(name) - if VM: - vminfo = dict() - vminfo['uuid'] = VM.id - vminfo['name'] = VM.name - vminfo['status'] = VM.status.state - vminfo['cpu_cores'] = VM.cpu.topology.cores - vminfo['cpu_sockets'] = VM.cpu.topology.sockets - vminfo['cpu_shares'] = VM.cpu_shares - vminfo['memory'] = (int(VM.memory) // 1024 // 1024 // 1024) - vminfo['mem_pol'] = (int(VM.memory_policy.guaranteed) // 1024 // 1024 // 1024) - vminfo['os'] = VM.get_os().type_ - vminfo['del_prot'] = VM.delete_protected - try: - vminfo['host'] = str(self.conn.get_Host_byid(str(VM.host.id)).name) - except Exception: - vminfo['host'] = None - vminfo['boot_order'] = [] - for boot_dev in VM.os.get_boot(): - vminfo['boot_order'].append(str(boot_dev.dev)) - vminfo['disks'] = [] - for DISK in VM.disks.list(): - disk = dict() - disk['name'] = DISK.name - disk['size'] = (int(DISK.size) // 1024 // 1024 // 1024) - disk['domain'] = str((self.conn.get_domain_byid(DISK.get_storage_domains().get_storage_domain()[0].id)).name) - disk['interface'] = DISK.interface - vminfo['disks'].append(disk) - vminfo['ifaces'] = [] - for NIC in VM.nics.list(): - iface = dict() - iface['name'] = str(NIC.name) - iface['vlan'] = str(self.conn.get_network_byid(NIC.get_network().id).name) - iface['interface'] = NIC.interface - iface['mac'] = NIC.mac.address - vminfo['ifaces'].append(iface) - vminfo[str(NIC.name)] = NIC.mac.address - CLUSTER = self.conn.get_cluster_byid(VM.cluster.id) - if CLUSTER: - vminfo['cluster'] = CLUSTER.name - else: - vminfo = False - return vminfo - - def createVMimage(self, name, cluster, template, disks): - self.__get_conn() - return self.conn.createVMimage(name, cluster, template, disks) - - def createVM(self, name, cluster, os, actiontype): - self.__get_conn() - return self.conn.createVM(name, cluster, os, actiontype) - - def setMemory(self, name, memory): - self.__get_conn() - return self.conn.set_Memory(name, memory) - - def setMemoryPolicy(self, name, memory_policy): - self.__get_conn() - return self.conn.set_Memory_Policy(name, memory_policy) - - def setCPU(self, name, cpu): - self.__get_conn() - return self.conn.set_CPU(name, cpu) - - def setCPUShare(self, name, cpu_share): - self.__get_conn() - return self.conn.set_CPU_share(name, cpu_share) - - def setDisks(self, name, disks): - self.__get_conn() - counter = 0 - bootselect = False - for disk in disks: - if 'bootable' in disk: - if disk['bootable'] is True: - bootselect = True - - for disk in disks: - diskname = name + "_Disk" + str(counter) + "_" + disk.get('name', '').replace('/', '_') - disksize = disk.get('size', 1) - diskdomain = disk.get('domain', None) - if diskdomain is None: - setMsg("`domain` is a required disk key.") - setFailed() - return False - diskinterface = disk.get('interface', 'virtio') - diskformat = disk.get('format', 'raw') - diskallocationtype = disk.get('thin', False) - diskboot = disk.get('bootable', False) - - if bootselect is False and counter == 0: - diskboot = True - - DISK = self.conn.get_disk(diskname) - - if DISK is None: - self.conn.createDisk(name, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot) - else: - self.conn.set_Disk(diskname, disksize, diskinterface, diskboot) - checkFail() - counter += 1 - - return True - - def setNetworks(self, vmname, ifaces): - self.__get_conn() - VM = self.conn.get_VM(vmname) - - counter = 0 - length = len(ifaces) - - for NIC in VM.nics.list(): - if counter < length: - iface = ifaces[counter] - name = iface.get('name', None) - if name is None: - setMsg("`name` is a required iface key.") - setFailed() - elif str(name) != str(NIC.name): - setMsg("ifaces are in the wrong order, rebuilding everything.") - for NIC in VM.nics.list(): - self.conn.del_NIC(vmname, NIC.name) - self.setNetworks(vmname, ifaces) - checkFail() - return True - vlan = iface.get('vlan', None) - if vlan is None: - setMsg("`vlan` is a required iface key.") - setFailed() - checkFail() - interface = iface.get('interface', 'virtio') - self.conn.set_NIC(vmname, str(NIC.name), name, vlan, interface) - else: - self.conn.del_NIC(vmname, NIC.name) - counter += 1 - checkFail() - - while counter < length: - iface = ifaces[counter] - name = iface.get('name', None) - if name is None: - setMsg("`name` is a required iface key.") - setFailed() - vlan = iface.get('vlan', None) - if vlan is None: - setMsg("`vlan` is a required iface key.") - setFailed() - if failed is True: - return False - interface = iface.get('interface', 'virtio') - self.conn.createNIC(vmname, name, vlan, interface) - - counter += 1 - checkFail() - return True - - def setDeleteProtection(self, vmname, del_prot): - self.__get_conn() - VM = self.conn.get_VM(vmname) - if bool(VM.delete_protected) != bool(del_prot): - self.conn.set_DeleteProtection(vmname, del_prot) - checkFail() - setMsg("`delete protection` has been updated.") - else: - setMsg("`delete protection` already has the right value.") - return True - - def setBootOrder(self, vmname, boot_order): - self.__get_conn() - VM = self.conn.get_VM(vmname) - bootorder = [] - for boot_dev in VM.os.get_boot(): - bootorder.append(str(boot_dev.dev)) - - if boot_order != bootorder: - self.conn.set_BootOrder(vmname, boot_order) - setMsg('The boot order has been set') - else: - setMsg('The boot order has already been set') - return True - - def removeVM(self, vmname): - self.__get_conn() - self.setPower(vmname, "down", 300) - return self.conn.remove_VM(vmname) - - def setPower(self, vmname, state, timeout): - self.__get_conn() - VM = self.conn.get_VM(vmname) - if VM is None: - setMsg("VM does not exist.") - setFailed() - return False - - if state == VM.status.state: - setMsg("VM state was already " + state) - else: - if state == "up": - setMsg("VM is going to start") - self.conn.start_VM(vmname, timeout) - setChanged() - elif state == "down": - setMsg("VM is going to stop") - self.conn.stop_VM(vmname, timeout) - setChanged() - elif state == "restarted": - self.setPower(vmname, "down", timeout) - checkFail() - self.setPower(vmname, "up", timeout) - checkFail() - setMsg("the vm state is set to " + state) - return True - - def setCD(self, vmname, cd_drive): - self.__get_conn() - if cd_drive: - return self.conn.set_CD(vmname, cd_drive) - else: - return self.conn.remove_CD(vmname) - - def setVMHost(self, vmname, vmhost): - self.__get_conn() - return self.conn.set_VM_Host(vmname, vmhost) - - def setHost(self, hostname, cluster, ifaces): - self.__get_conn() - return self.conn.set_Host(hostname, cluster, ifaces) - - -def checkFail(): - if failed: - module.fail_json(msg=msg) - else: - return True - - -def setFailed(): - global failed - failed = True - - -def setChanged(): - global changed - changed = True - - -def setMsg(message): - global failed - msg.append(message) - - -def core(module): - - r = RHEV(module) - - state = module.params.get('state') - - if state == 'ping': - r.test() - return RHEV_SUCCESS, {"ping": "pong"} - elif state == 'info': - name = module.params.get('name') - if not name: - setMsg("`name` is a required argument.") - return RHEV_FAILED, msg - vminfo = r.getVM(name) - return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo} - elif state == 'present': - created = False - name = module.params.get('name') - if not name: - setMsg("`name` is a required argument.") - return RHEV_FAILED, msg - actiontype = module.params.get('type') - if actiontype == 'server' or actiontype == 'desktop': - vminfo = r.getVM(name) - if vminfo: - setMsg('VM exists') - else: - # Create VM - cluster = module.params.get('cluster') - if cluster is None: - setMsg("cluster is a required argument.") - setFailed() - template = module.params.get('image') - if template: - disks = module.params.get('disks') - if disks is None: - setMsg("disks is a required argument.") - setFailed() - checkFail() - if r.createVMimage(name, cluster, template, disks) is False: - return RHEV_FAILED, vminfo - else: - os = module.params.get('osver') - if os is None: - setMsg("osver is a required argument.") - setFailed() - checkFail() - if r.createVM(name, cluster, os, actiontype) is False: - return RHEV_FAILED, vminfo - created = True - - # Set MEMORY and MEMORY POLICY - vminfo = r.getVM(name) - memory = module.params.get('vmmem') - if memory is not None: - memory_policy = module.params.get('mempol') - if memory_policy == 0: - memory_policy = memory - mem_pol_nok = True - if int(vminfo['mem_pol']) == memory_policy: - setMsg("Memory is correct") - mem_pol_nok = False - - mem_nok = True - if int(vminfo['memory']) == memory: - setMsg("Memory is correct") - mem_nok = False - - if memory_policy > memory: - setMsg('memory_policy cannot have a higher value than memory.') - return RHEV_FAILED, msg - - if mem_nok and mem_pol_nok: - if memory_policy > int(vminfo['memory']): - r.setMemory(vminfo['name'], memory) - r.setMemoryPolicy(vminfo['name'], memory_policy) - else: - r.setMemoryPolicy(vminfo['name'], memory_policy) - r.setMemory(vminfo['name'], memory) - elif mem_nok: - r.setMemory(vminfo['name'], memory) - elif mem_pol_nok: - r.setMemoryPolicy(vminfo['name'], memory_policy) - checkFail() - - # Set CPU - cpu = module.params.get('vmcpu') - if int(vminfo['cpu_cores']) == cpu: - setMsg("Number of CPUs is correct") - else: - if r.setCPU(vminfo['name'], cpu) is False: - return RHEV_FAILED, msg - - # Set CPU SHARE - cpu_share = module.params.get('cpu_share') - if cpu_share is not None: - if int(vminfo['cpu_shares']) == cpu_share: - setMsg("CPU share is correct.") - else: - if r.setCPUShare(vminfo['name'], cpu_share) is False: - return RHEV_FAILED, msg - - # Set DISKS - disks = module.params.get('disks') - if disks is not None: - if r.setDisks(vminfo['name'], disks) is False: - return RHEV_FAILED, msg - - # Set NETWORKS - ifaces = module.params.get('ifaces', None) - if ifaces is not None: - if r.setNetworks(vminfo['name'], ifaces) is False: - return RHEV_FAILED, msg - - # Set Delete Protection - del_prot = module.params.get('del_prot') - if r.setDeleteProtection(vminfo['name'], del_prot) is False: - return RHEV_FAILED, msg - - # Set Boot Order - boot_order = module.params.get('boot_order') - if r.setBootOrder(vminfo['name'], boot_order) is False: - return RHEV_FAILED, msg - - # Set VM Host - vmhost = module.params.get('vmhost') - if vmhost: - if r.setVMHost(vminfo['name'], vmhost) is False: - return RHEV_FAILED, msg - - vminfo = r.getVM(name) - vminfo['created'] = created - return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo} - - if actiontype == 'host': - cluster = module.params.get('cluster') - if cluster is None: - setMsg("cluster is a required argument.") - setFailed() - ifaces = module.params.get('ifaces') - if ifaces is None: - setMsg("ifaces is a required argument.") - setFailed() - if r.setHost(name, cluster, ifaces) is False: - return RHEV_FAILED, msg - return RHEV_SUCCESS, {'changed': changed, 'msg': msg} - - elif state == 'absent': - name = module.params.get('name') - if not name: - setMsg("`name` is a required argument.") - return RHEV_FAILED, msg - actiontype = module.params.get('type') - if actiontype == 'server' or actiontype == 'desktop': - vminfo = r.getVM(name) - if vminfo: - setMsg('VM exists') - - # Set Delete Protection - del_prot = module.params.get('del_prot') - if r.setDeleteProtection(vminfo['name'], del_prot) is False: - return RHEV_FAILED, msg - - # Remove VM - if r.removeVM(vminfo['name']) is False: - return RHEV_FAILED, msg - setMsg('VM has been removed.') - vminfo['state'] = 'DELETED' - else: - setMsg('VM was already removed.') - return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo} - - elif state == 'up' or state == 'down' or state == 'restarted': - name = module.params.get('name') - if not name: - setMsg("`name` is a required argument.") - return RHEV_FAILED, msg - timeout = module.params.get('timeout') - if r.setPower(name, state, timeout) is False: - return RHEV_FAILED, msg - vminfo = r.getVM(name) - return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo} - - elif state == 'cd': - name = module.params.get('name') - cd_drive = module.params.get('cd_drive') - if r.setCD(name, cd_drive) is False: - return RHEV_FAILED, msg - return RHEV_SUCCESS, {'changed': changed, 'msg': msg} - - -def main(): - global module - module = AnsibleModule( - argument_spec=dict( - state=dict(type='str', default='present', choices=['absent', 'cd', 'down', 'info', 'ping', 'present', 'restarted', 'up']), - user=dict(type='str', default='admin@internal'), - password=dict(type='str', required=True, no_log=True), - server=dict(type='str', default='127.0.0.1'), - port=dict(type='int', default=443), - insecure_api=dict(type='bool', default=False), - name=dict(type='str'), - image=dict(type='str'), - datacenter=dict(type='str', default="Default"), - type=dict(type='str', default='server', choices=['desktop', 'host', 'server']), - cluster=dict(type='str', default=''), - vmhost=dict(type='str'), - vmcpu=dict(type='int', default=2), - vmmem=dict(type='int', default=1), - disks=dict(type='list', elements='str'), - osver=dict(type='str', default="rhel_6x64"), - ifaces=dict(type='list', elements='str', aliases=['interfaces', 'nics']), - timeout=dict(type='int'), - mempol=dict(type='int', default=1), - vm_ha=dict(type='bool', default=True), - cpu_share=dict(type='int', default=0), - boot_order=dict(type='list', elements='str', default=['hd', 'network']), - del_prot=dict(type='bool', default=True), - cd_drive=dict(type='str'), - ), - ) - - if not HAS_SDK: - module.fail_json(msg="The 'ovirtsdk' module is not importable. Check the requirements.") - - rc = RHEV_SUCCESS - try: - rc, result = core(module) - except Exception as e: - module.fail_json(msg=str(e)) - - if rc != 0: # something went wrong emit the msg - module.fail_json(rc=rc, msg=result) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/misc/serverless.py b/ansible_collections/community/general/plugins/modules/cloud/misc/serverless.py deleted file mode 100644 index 878621c3..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/misc/serverless.py +++ /dev/null @@ -1,224 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Ryan Scott Brown -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: serverless -short_description: Manages a Serverless Framework project -description: - - Provides support for managing Serverless Framework (https://serverless.com/) project deployments and stacks. -options: - state: - description: - - Goal state of given stage/project. - type: str - choices: [ absent, present ] - default: present - serverless_bin_path: - description: - - The path of a serverless framework binary relative to the 'service_path' eg. node_module/.bin/serverless - type: path - service_path: - description: - - The path to the root of the Serverless Service to be operated on. - type: path - required: true - stage: - description: - - The name of the serverless framework project stage to deploy to. - - This uses the serverless framework default "dev". - type: str - functions: - description: - - A list of specific functions to deploy. - - If this is not provided, all functions in the service will be deployed. - - Deprecated parameter, it will be removed in community.general 5.0.0. - type: list - elements: str - default: [] - region: - description: - - AWS region to deploy the service to. - - This parameter defaults to C(us-east-1). - type: str - deploy: - description: - - Whether or not to deploy artifacts after building them. - - When this option is C(false) all the functions will be built, but no stack update will be run to send them out. - - This is mostly useful for generating artifacts to be stored/deployed elsewhere. - type: bool - default: yes - force: - description: - - Whether or not to force full deployment, equivalent to serverless C(--force) option. - type: bool - default: no - verbose: - description: - - Shows all stack events during deployment, and display any Stack Output. - type: bool - default: no -notes: - - Currently, the C(serverless) command must be in the path of the node executing the task. - In the future this may be a flag. -requirements: -- serverless -- yaml -author: -- Ryan Scott Brown (@ryansb) -''' - -EXAMPLES = r''' -- name: Basic deploy of a service - community.general.serverless: - service_path: '{{ project_dir }}' - state: present - -- name: Deploy a project, then pull its resource list back into Ansible - community.general.serverless: - stage: dev - region: us-east-1 - service_path: '{{ project_dir }}' - register: sls - -# The cloudformation stack is always named the same as the full service, so the -# cloudformation_info module can get a full list of the stack resources, as -# well as stack events and outputs -- cloudformation_info: - region: us-east-1 - stack_name: '{{ sls.service_name }}' - stack_resources: true - -- name: Deploy a project using a locally installed serverless binary - community.general.serverless: - stage: dev - region: us-east-1 - service_path: '{{ project_dir }}' - serverless_bin_path: node_modules/.bin/serverless -''' - -RETURN = r''' -service_name: - type: str - description: The service name specified in the serverless.yml that was just deployed. - returned: always - sample: my-fancy-service-dev -state: - type: str - description: Whether the stack for the serverless project is present/absent. - returned: always -command: - type: str - description: Full `serverless` command run by this module, in case you want to re-run the command outside the module. - returned: always - sample: serverless deploy --stage production -''' - -import os - -try: - import yaml - HAS_YAML = True -except ImportError: - HAS_YAML = False - -from ansible.module_utils.basic import AnsibleModule - - -def read_serverless_config(module): - path = module.params.get('service_path') - full_path = os.path.join(path, 'serverless.yml') - - try: - with open(full_path) as sls_config: - config = yaml.safe_load(sls_config.read()) - return config - except IOError as e: - module.fail_json(msg="Could not open serverless.yml in {0}. err: {1}".format(full_path, str(e))) - - -def get_service_name(module, stage): - config = read_serverless_config(module) - if config.get('service') is None: - module.fail_json(msg="Could not read `service` key from serverless.yml file") - - if stage: - return "{0}-{1}".format(config['service'], stage) - - return "{0}-{1}".format(config['service'], config.get('stage', 'dev')) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - service_path=dict(type='path', required=True), - state=dict(type='str', default='present', choices=['absent', 'present']), - functions=dict(type='list', elements='str', - removed_in_version="5.0.0", removed_from_collection="community.general"), - region=dict(type='str', default=''), - stage=dict(type='str', default=''), - deploy=dict(type='bool', default=True), - serverless_bin_path=dict(type='path'), - force=dict(type='bool', default=False), - verbose=dict(type='bool', default=False), - ), - ) - - if not HAS_YAML: - module.fail_json(msg='yaml is required for this module') - - service_path = module.params.get('service_path') - state = module.params.get('state') - region = module.params.get('region') - stage = module.params.get('stage') - deploy = module.params.get('deploy', True) - force = module.params.get('force', False) - verbose = module.params.get('verbose', False) - serverless_bin_path = module.params.get('serverless_bin_path') - - if serverless_bin_path is not None: - command = serverless_bin_path + " " - else: - command = module.get_bin_path("serverless") + " " - - if state == 'present': - command += 'deploy ' - elif state == 'absent': - command += 'remove ' - else: - module.fail_json(msg="State must either be 'present' or 'absent'. Received: {0}".format(state)) - - if state == 'present': - if not deploy: - command += '--noDeploy ' - elif force: - command += '--force ' - - if region: - command += '--region {0} '.format(region) - if stage: - command += '--stage {0} '.format(stage) - if verbose: - command += '--verbose ' - - rc, out, err = module.run_command(command, cwd=service_path) - if rc != 0: - if state == 'absent' and "-{0}' does not exist".format(stage) in out: - module.exit_json(changed=False, state='absent', command=command, - out=out, service_name=get_service_name(module, stage)) - - module.fail_json(msg="Failure when executing Serverless command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err)) - - # gather some facts about the deployment - module.exit_json(changed=True, state='present', out=out, command=command, - service_name=get_service_name(module, stage)) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/misc/terraform.py b/ansible_collections/community/general/plugins/modules/cloud/misc/terraform.py deleted file mode 100644 index 8eca14e7..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/misc/terraform.py +++ /dev/null @@ -1,521 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2017, Ryan Scott Brown -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: terraform -short_description: Manages a Terraform deployment (and plans) -description: - - Provides support for deploying resources with Terraform and pulling - resource information back into Ansible. -options: - state: - choices: ['planned', 'present', 'absent'] - description: - - Goal state of given stage/project - type: str - default: present - binary_path: - description: - - The path of a terraform binary to use, relative to the 'service_path' - unless you supply an absolute path. - type: path - project_path: - description: - - The path to the root of the Terraform directory with the - vars.tf/main.tf/etc to use. - type: path - required: true - plugin_paths: - description: - - List of paths containing Terraform plugin executable files. - - Plugin executables can be downloaded from U(https://releases.hashicorp.com/). - - When set, the plugin discovery and auto-download behavior of Terraform is disabled. - - The directory structure in the plugin path can be tricky. The Terraform docs - U(https://learn.hashicorp.com/tutorials/terraform/automate-terraform#pre-installed-plugins) - show a simple directory of files, but actually, the directory structure - has to follow the same structure you would see if Terraform auto-downloaded the plugins. - See the examples below for a tree output of an example plugin directory. - type: list - elements: path - version_added: 3.0.0 - workspace: - description: - - The terraform workspace to work with. - type: str - default: default - purge_workspace: - description: - - Only works with state = absent - - If true, the workspace will be deleted after the "terraform destroy" action. - - The 'default' workspace will not be deleted. - default: false - type: bool - plan_file: - description: - - The path to an existing Terraform plan file to apply. If this is not - specified, Ansible will build a new TF plan and execute it. - Note that this option is required if 'state' has the 'planned' value. - type: path - state_file: - description: - - The path to an existing Terraform state file to use when building plan. - If this is not specified, the default `terraform.tfstate` will be used. - - This option is ignored when plan is specified. - type: path - variables_files: - description: - - The path to a variables file for Terraform to fill into the TF - configurations. This can accept a list of paths to multiple variables files. - - Up until Ansible 2.9, this option was usable as I(variables_file). - type: list - elements: path - aliases: [ 'variables_file' ] - variables: - description: - - A group of key-values to override template variables or those in - variables files. - type: dict - targets: - description: - - A list of specific resources to target in this plan/application. The - resources selected here will also auto-include any dependencies. - type: list - elements: str - lock: - description: - - Enable statefile locking, if you use a service that accepts locks (such - as S3+DynamoDB) to store your statefile. - type: bool - default: true - lock_timeout: - description: - - How long to maintain the lock on the statefile, if you use a service - that accepts locks (such as S3+DynamoDB). - type: int - force_init: - description: - - To avoid duplicating infra, if a state file can't be found this will - force a `terraform init`. Generally, this should be turned off unless - you intend to provision an entirely new Terraform deployment. - default: false - type: bool - overwrite_init: - description: - - Run init even if C(.terraform/terraform.tfstate) already exists in I(project_path). - default: true - type: bool - version_added: '3.2.0' - backend_config: - description: - - A group of key-values to provide at init stage to the -backend-config parameter. - type: dict - backend_config_files: - description: - - The path to a configuration file to provide at init state to the -backend-config parameter. - This can accept a list of paths to multiple configuration files. - type: list - elements: path - version_added: '0.2.0' - init_reconfigure: - description: - - Forces backend reconfiguration during init. - default: false - type: bool - version_added: '1.3.0' - check_destroy: - description: - - Apply only when no resources are destroyed. Note that this only prevents "destroy" actions, - but not "destroy and re-create" actions. This option is ignored when I(state=absent). - type: bool - default: false - version_added: '3.3.0' - parallelism: - description: - - Restrict concurrent operations when Terraform applies the plan. - type: int - version_added: '3.8.0' -notes: - - To just run a `terraform plan`, use check mode. -requirements: [ "terraform" ] -author: "Ryan Scott Brown (@ryansb)" -''' - -EXAMPLES = """ -- name: Basic deploy of a service - community.general.terraform: - project_path: '{{ project_dir }}' - state: present - -- name: Define the backend configuration at init - community.general.terraform: - project_path: 'project/' - state: "{{ state }}" - force_init: true - backend_config: - region: "eu-west-1" - bucket: "some-bucket" - key: "random.tfstate" - -- name: Define the backend configuration with one or more files at init - community.general.terraform: - project_path: 'project/' - state: "{{ state }}" - force_init: true - backend_config_files: - - /path/to/backend_config_file_1 - - /path/to/backend_config_file_2 - -- name: Disable plugin discovery and auto-download by setting plugin_paths - community.general.terraform: - project_path: 'project/' - state: "{{ state }}" - force_init: true - plugin_paths: - - /path/to/plugins_dir_1 - - /path/to/plugins_dir_2 - -### Example directory structure for plugin_paths example -# $ tree /path/to/plugins_dir_1 -# /path/to/plugins_dir_1/ -# └── registry.terraform.io -# └── hashicorp -# └── vsphere -# ├── 1.24.0 -# │ └── linux_amd64 -# │ └── terraform-provider-vsphere_v1.24.0_x4 -# └── 1.26.0 -# └── linux_amd64 -# └── terraform-provider-vsphere_v1.26.0_x4 -""" - -RETURN = """ -outputs: - type: complex - description: A dictionary of all the TF outputs by their assigned name. Use `.outputs.MyOutputName.value` to access the value. - returned: on success - sample: '{"bukkit_arn": {"sensitive": false, "type": "string", "value": "arn:aws:s3:::tf-test-bukkit"}' - contains: - sensitive: - type: bool - returned: always - description: Whether Terraform has marked this value as sensitive - type: - type: str - returned: always - description: The type of the value (string, int, etc) - value: - type: str - returned: always - description: The value of the output as interpolated by Terraform -stdout: - type: str - description: Full `terraform` command stdout, in case you want to display it or examine the event log - returned: always - sample: '' -command: - type: str - description: Full `terraform` command built by this module, in case you want to re-run the command outside the module or debug a problem. - returned: always - sample: terraform apply ... -""" - -import os -import json -import tempfile -from ansible.module_utils.six.moves import shlex_quote - -from ansible.module_utils.basic import AnsibleModule - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -module = None - - -def get_version(bin_path): - extract_version = module.run_command([bin_path, 'version', '-json']) - terraform_version = (json.loads(extract_version[1]))['terraform_version'] - return terraform_version - - -def preflight_validation(bin_path, project_path, version, variables_args=None, plan_file=None): - if project_path is None or '/' not in project_path: - module.fail_json(msg="Path for Terraform project can not be None or ''.") - if not os.path.exists(bin_path): - module.fail_json(msg="Path for Terraform binary '{0}' doesn't exist on this host - check the path and try again please.".format(bin_path)) - if not os.path.isdir(project_path): - module.fail_json(msg="Path for Terraform project '{0}' doesn't exist on this host - check the path and try again please.".format(project_path)) - if LooseVersion(version) < LooseVersion('0.15.0'): - rc, out, err = module.run_command([bin_path, 'validate'] + variables_args, check_rc=True, cwd=project_path) - else: - rc, out, err = module.run_command([bin_path, 'validate'], check_rc=True, cwd=project_path) - - -def _state_args(state_file): - if state_file and os.path.exists(state_file): - return ['-state', state_file] - if state_file and not os.path.exists(state_file): - module.fail_json(msg='Could not find state_file "{0}", check the path and try again.'.format(state_file)) - return [] - - -def init_plugins(bin_path, project_path, backend_config, backend_config_files, init_reconfigure, plugin_paths): - command = [bin_path, 'init', '-input=false'] - if backend_config: - for key, val in backend_config.items(): - command.extend([ - '-backend-config', - shlex_quote('{0}={1}'.format(key, val)) - ]) - if backend_config_files: - for f in backend_config_files: - command.extend(['-backend-config', f]) - if init_reconfigure: - command.extend(['-reconfigure']) - if plugin_paths: - for plugin_path in plugin_paths: - command.extend(['-plugin-dir', plugin_path]) - rc, out, err = module.run_command(command, check_rc=True, cwd=project_path) - - -def get_workspace_context(bin_path, project_path): - workspace_ctx = {"current": "default", "all": []} - command = [bin_path, 'workspace', 'list', '-no-color'] - rc, out, err = module.run_command(command, cwd=project_path) - if rc != 0: - module.warn("Failed to list Terraform workspaces:\r\n{0}".format(err)) - for item in out.split('\n'): - stripped_item = item.strip() - if not stripped_item: - continue - elif stripped_item.startswith('* '): - workspace_ctx["current"] = stripped_item.replace('* ', '') - else: - workspace_ctx["all"].append(stripped_item) - return workspace_ctx - - -def _workspace_cmd(bin_path, project_path, action, workspace): - command = [bin_path, 'workspace', action, workspace, '-no-color'] - rc, out, err = module.run_command(command, check_rc=True, cwd=project_path) - return rc, out, err - - -def create_workspace(bin_path, project_path, workspace): - _workspace_cmd(bin_path, project_path, 'new', workspace) - - -def select_workspace(bin_path, project_path, workspace): - _workspace_cmd(bin_path, project_path, 'select', workspace) - - -def remove_workspace(bin_path, project_path, workspace): - _workspace_cmd(bin_path, project_path, 'delete', workspace) - - -def build_plan(command, project_path, variables_args, state_file, targets, state, apply_args, plan_path=None): - if plan_path is None: - f, plan_path = tempfile.mkstemp(suffix='.tfplan') - - local_command = command.copy() - - plan_command = [command[0], 'plan'] - - if state == "planned": - for c in local_command[1:]: - plan_command.append(c) - - if state == "present": - for a in apply_args: - local_command.remove(a) - for c in local_command[1:]: - plan_command.append(c) - - plan_command.extend(['-input=false', '-no-color', '-detailed-exitcode', '-out', plan_path]) - - for t in targets: - plan_command.extend(['-target', t]) - - plan_command.extend(_state_args(state_file)) - - rc, out, err = module.run_command(plan_command + variables_args, cwd=project_path) - - if rc == 0: - # no changes - return plan_path, False, out, err, plan_command if state == 'planned' else command - elif rc == 1: - # failure to plan - module.fail_json(msg='Terraform plan could not be created\r\nSTDOUT: {0}\r\n\r\nSTDERR: {1}'.format(out, err)) - elif rc == 2: - # changes, but successful - return plan_path, True, out, err, plan_command if state == 'planned' else command - - module.fail_json(msg='Terraform plan failed with unexpected exit code {0}. \r\nSTDOUT: {1}\r\n\r\nSTDERR: {2}'.format(rc, out, err)) - - -def main(): - global module - module = AnsibleModule( - argument_spec=dict( - project_path=dict(required=True, type='path'), - binary_path=dict(type='path'), - plugin_paths=dict(type='list', elements='path'), - workspace=dict(type='str', default='default'), - purge_workspace=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent', 'planned']), - variables=dict(type='dict'), - variables_files=dict(aliases=['variables_file'], type='list', elements='path'), - plan_file=dict(type='path'), - state_file=dict(type='path'), - targets=dict(type='list', elements='str', default=[]), - lock=dict(type='bool', default=True), - lock_timeout=dict(type='int',), - force_init=dict(type='bool', default=False), - backend_config=dict(type='dict'), - backend_config_files=dict(type='list', elements='path'), - init_reconfigure=dict(type='bool', default=False), - overwrite_init=dict(type='bool', default=True), - check_destroy=dict(type='bool', default=False), - parallelism=dict(type='int'), - ), - required_if=[('state', 'planned', ['plan_file'])], - supports_check_mode=True, - ) - - project_path = module.params.get('project_path') - bin_path = module.params.get('binary_path') - plugin_paths = module.params.get('plugin_paths') - workspace = module.params.get('workspace') - purge_workspace = module.params.get('purge_workspace') - state = module.params.get('state') - variables = module.params.get('variables') or {} - variables_files = module.params.get('variables_files') - plan_file = module.params.get('plan_file') - state_file = module.params.get('state_file') - force_init = module.params.get('force_init') - backend_config = module.params.get('backend_config') - backend_config_files = module.params.get('backend_config_files') - init_reconfigure = module.params.get('init_reconfigure') - overwrite_init = module.params.get('overwrite_init') - check_destroy = module.params.get('check_destroy') - - if bin_path is not None: - command = [bin_path] - else: - command = [module.get_bin_path('terraform', required=True)] - - checked_version = get_version(command[0]) - - if LooseVersion(checked_version) < LooseVersion('0.15.0'): - DESTROY_ARGS = ('destroy', '-no-color', '-force') - APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve=true') - else: - DESTROY_ARGS = ('destroy', '-no-color', '-auto-approve') - APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve') - - if force_init: - if overwrite_init or not os.path.isfile(os.path.join(project_path, ".terraform", "terraform.tfstate")): - init_plugins(command[0], project_path, backend_config, backend_config_files, init_reconfigure, plugin_paths) - - workspace_ctx = get_workspace_context(command[0], project_path) - if workspace_ctx["current"] != workspace: - if workspace not in workspace_ctx["all"]: - create_workspace(command[0], project_path, workspace) - else: - select_workspace(command[0], project_path, workspace) - - if state == 'present': - command.extend(APPLY_ARGS) - elif state == 'absent': - command.extend(DESTROY_ARGS) - - if state == 'present' and module.params.get('parallelism') is not None: - command.append('-parallelism=%d' % module.params.get('parallelism')) - - variables_args = [] - for k, v in variables.items(): - variables_args.extend([ - '-var', - '{0}={1}'.format(k, v) - ]) - if variables_files: - for f in variables_files: - variables_args.extend(['-var-file', f]) - - preflight_validation(command[0], project_path, checked_version, variables_args) - - if module.params.get('lock') is not None: - if module.params.get('lock'): - command.append('-lock=true') - else: - command.append('-lock=false') - if module.params.get('lock_timeout') is not None: - command.append('-lock-timeout=%ds' % module.params.get('lock_timeout')) - - for t in (module.params.get('targets') or []): - command.extend(['-target', t]) - - # we aren't sure if this plan will result in changes, so assume yes - needs_application, changed = True, False - - out, err = '', '' - - if state == 'absent': - command.extend(variables_args) - elif state == 'present' and plan_file: - if any([os.path.isfile(project_path + "/" + plan_file), os.path.isfile(plan_file)]): - command.append(plan_file) - else: - module.fail_json(msg='Could not find plan_file "{0}", check the path and try again.'.format(plan_file)) - else: - plan_file, needs_application, out, err, command = build_plan(command, project_path, variables_args, state_file, - module.params.get('targets'), state, APPLY_ARGS, plan_file) - if state == 'present' and check_destroy and '- destroy' in out: - module.fail_json(msg="Aborting command because it would destroy some resources. " - "Consider switching the 'check_destroy' to false to suppress this error") - command.append(plan_file) - - if needs_application and not module.check_mode and state != 'planned': - rc, out, err = module.run_command(command, check_rc=False, cwd=project_path) - if rc != 0: - if workspace_ctx["current"] != workspace: - select_workspace(command[0], project_path, workspace_ctx["current"]) - module.fail_json(msg=err.rstrip(), rc=rc, stdout=out, - stdout_lines=out.splitlines(), stderr=err, - stderr_lines=err.splitlines(), - cmd=' '.join(command)) - # checks out to decide if changes were made during execution - if ' 0 added, 0 changed' not in out and not state == "absent" or ' 0 destroyed' not in out: - changed = True - - outputs_command = [command[0], 'output', '-no-color', '-json'] + _state_args(state_file) - rc, outputs_text, outputs_err = module.run_command(outputs_command, cwd=project_path) - if rc == 1: - module.warn("Could not get Terraform outputs. This usually means none have been defined.\nstdout: {0}\nstderr: {1}".format(outputs_text, outputs_err)) - outputs = {} - elif rc != 0: - module.fail_json( - msg="Failure when getting Terraform outputs. " - "Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, outputs_text, outputs_err), - command=' '.join(outputs_command)) - else: - outputs = json.loads(outputs_text) - - # Restore the Terraform workspace found when running the module - if workspace_ctx["current"] != workspace: - select_workspace(command[0], project_path, workspace_ctx["current"]) - if state == 'absent' and workspace != 'default' and purge_workspace is True: - remove_workspace(command[0], project_path, workspace) - - module.exit_json(changed=changed, state=state, workspace=workspace, outputs=outputs, stdout=out, stderr=err, command=' '.join(command)) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/misc/xenserver_facts.py b/ansible_collections/community/general/plugins/modules/cloud/misc/xenserver_facts.py deleted file mode 100644 index f65e3c9a..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/misc/xenserver_facts.py +++ /dev/null @@ -1,205 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: xenserver_facts -short_description: get facts reported on xenserver -description: - - Reads data out of XenAPI, can be used instead of multiple xe commands. -author: - - Andy Hill (@andyhky) - - Tim Rupp (@caphrim007) - - Robin Lee (@cheese) -options: {} -''' - -EXAMPLES = ''' -- name: Gather facts from xenserver - community.general.xenserver_facts: - -- name: Print running VMs - ansible.builtin.debug: - msg: "{{ item }}" - with_items: "{{ xs_vms.keys() }}" - when: xs_vms[item]['power_state'] == "Running" - -# Which will print: -# -# TASK: [Print running VMs] *********************************************************** -# skipping: [10.13.0.22] => (item=CentOS 4.7 (32-bit)) -# ok: [10.13.0.22] => (item=Control domain on host: 10.0.13.22) => { -# "item": "Control domain on host: 10.0.13.22", -# "msg": "Control domain on host: 10.0.13.22" -# } -''' - - -HAVE_XENAPI = False -try: - import XenAPI - HAVE_XENAPI = True -except ImportError: - pass - -from ansible.module_utils import distro -from ansible.module_utils.basic import AnsibleModule - - -class XenServerFacts: - def __init__(self): - self.codes = { - '5.5.0': 'george', - '5.6.100': 'oxford', - '6.0.0': 'boston', - '6.1.0': 'tampa', - '6.2.0': 'clearwater' - } - - @property - def version(self): - result = distro.linux_distribution()[1] - return result - - @property - def codename(self): - if self.version in self.codes: - result = self.codes[self.version] - else: - result = None - - return result - - -def get_xenapi_session(): - session = XenAPI.xapi_local() - session.xenapi.login_with_password('', '') - return session - - -def get_networks(session): - recs = session.xenapi.network.get_all_records() - networks = change_keys(recs, key='name_label') - return networks - - -def get_pifs(session): - recs = session.xenapi.PIF.get_all_records() - pifs = change_keys(recs, key='uuid') - xs_pifs = {} - devicenums = range(0, 7) - for pif in pifs.values(): - for eth in devicenums: - interface_name = "eth%s" % (eth) - bond_name = interface_name.replace('eth', 'bond') - if pif['device'] == interface_name: - xs_pifs[interface_name] = pif - elif pif['device'] == bond_name: - xs_pifs[bond_name] = pif - return xs_pifs - - -def get_vlans(session): - recs = session.xenapi.VLAN.get_all_records() - return change_keys(recs, key='tag') - - -def change_keys(recs, key='uuid', filter_func=None): - """ - Take a xapi dict, and make the keys the value of recs[ref][key]. - - Preserves the ref in rec['ref'] - - """ - new_recs = {} - - for ref, rec in recs.items(): - if filter_func is not None and not filter_func(rec): - continue - - for param_name, param_value in rec.items(): - # param_value may be of type xmlrpc.client.DateTime, - # which is not simply convertable to str. - # Use 'value' attr to get the str value, - # following an example in xmlrpc.client.DateTime document - if hasattr(param_value, "value"): - rec[param_name] = param_value.value - new_recs[rec[key]] = rec - new_recs[rec[key]]['ref'] = ref - - return new_recs - - -def get_host(session): - """Get the host""" - host_recs = session.xenapi.host.get_all() - # We only have one host, so just return its entry - return session.xenapi.host.get_record(host_recs[0]) - - -def get_vms(session): - recs = session.xenapi.VM.get_all_records() - if not recs: - return None - vms = change_keys(recs, key='name_label') - return vms - - -def get_srs(session): - recs = session.xenapi.SR.get_all_records() - if not recs: - return None - srs = change_keys(recs, key='name_label') - return srs - - -def main(): - module = AnsibleModule( - supports_check_mode=True, - ) - - if not HAVE_XENAPI: - module.fail_json(changed=False, msg="python xen api required for this module") - - obj = XenServerFacts() - try: - session = get_xenapi_session() - except XenAPI.Failure as e: - module.fail_json(msg='%s' % e) - - data = { - 'xenserver_version': obj.version, - 'xenserver_codename': obj.codename - } - - xs_networks = get_networks(session) - xs_pifs = get_pifs(session) - xs_vlans = get_vlans(session) - xs_vms = get_vms(session) - xs_srs = get_srs(session) - - if xs_vlans: - data['xs_vlans'] = xs_vlans - if xs_pifs: - data['xs_pifs'] = xs_pifs - if xs_networks: - data['xs_networks'] = xs_networks - - if xs_vms: - data['xs_vms'] = xs_vms - - if xs_srs: - data['xs_srs'] = xs_srs - - module.exit_json(ansible_facts=data) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py b/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py deleted file mode 100644 index d46ce388..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py +++ /dev/null @@ -1,579 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: oneandone_firewall_policy -short_description: Configure 1&1 firewall policy. -description: - - Create, remove, reconfigure, update firewall policies. - This module has a dependency on 1and1 >= 1.0 -options: - state: - description: - - Define a firewall policy state to create, remove, or update. - required: false - type: str - default: 'present' - choices: [ "present", "absent", "update" ] - auth_token: - description: - - Authenticating API token provided by 1&1. - type: str - api_url: - description: - - Custom API URL. Overrides the - ONEANDONE_API_URL environment variable. - type: str - required: false - name: - description: - - Firewall policy name used with present state. Used as identifier (id or name) when used with absent state. - maxLength=128 - type: str - firewall_policy: - description: - - The identifier (id or name) of the firewall policy used with update state. - type: str - rules: - description: - - A list of rules that will be set for the firewall policy. - Each rule must contain protocol parameter, in addition to three optional parameters - (port_from, port_to, and source) - type: list - elements: dict - add_server_ips: - description: - - A list of server identifiers (id or name) to be assigned to a firewall policy. - Used in combination with update state. - type: list - elements: str - required: false - remove_server_ips: - description: - - A list of server IP ids to be unassigned from a firewall policy. Used in combination with update state. - type: list - elements: str - required: false - add_rules: - description: - - A list of rules that will be added to an existing firewall policy. - It is syntax is the same as the one used for rules parameter. Used in combination with update state. - type: list - elements: dict - required: false - remove_rules: - description: - - A list of rule ids that will be removed from an existing firewall policy. Used in combination with update state. - type: list - elements: str - required: false - description: - description: - - Firewall policy description. maxLength=256 - type: str - required: false - wait: - description: - - wait for the instance to be in state 'running' before returning - required: false - default: "yes" - type: bool - wait_timeout: - description: - - how long before wait gives up, in seconds - type: int - default: 600 - wait_interval: - description: - - Defines the number of seconds to wait when using the _wait_for methods - type: int - default: 5 - -requirements: - - "1and1" - - "python >= 2.6" - -author: - - "Amel Ajdinovic (@aajdinov)" - - "Ethan Devenport (@edevenport)" -''' - -EXAMPLES = ''' -- name: Create a firewall policy - community.general.oneandone_firewall_policy: - auth_token: oneandone_private_api_key - name: ansible-firewall-policy - description: Testing creation of firewall policies with ansible - rules: - - - protocol: TCP - port_from: 80 - port_to: 80 - source: 0.0.0.0 - wait: true - wait_timeout: 500 - -- name: Destroy a firewall policy - community.general.oneandone_firewall_policy: - auth_token: oneandone_private_api_key - state: absent - name: ansible-firewall-policy - -- name: Update a firewall policy - community.general.oneandone_firewall_policy: - auth_token: oneandone_private_api_key - state: update - firewall_policy: ansible-firewall-policy - name: ansible-firewall-policy-updated - description: Testing creation of firewall policies with ansible - updated - -- name: Add server to a firewall policy - community.general.oneandone_firewall_policy: - auth_token: oneandone_private_api_key - firewall_policy: ansible-firewall-policy-updated - add_server_ips: - - server_identifier (id or name) - - server_identifier #2 (id or name) - wait: true - wait_timeout: 500 - state: update - -- name: Remove server from a firewall policy - community.general.oneandone_firewall_policy: - auth_token: oneandone_private_api_key - firewall_policy: ansible-firewall-policy-updated - remove_server_ips: - - B2504878540DBC5F7634EB00A07C1EBD (server's IP id) - wait: true - wait_timeout: 500 - state: update - -- name: Add rules to a firewall policy - community.general.oneandone_firewall_policy: - auth_token: oneandone_private_api_key - firewall_policy: ansible-firewall-policy-updated - description: Adding rules to an existing firewall policy - add_rules: - - - protocol: TCP - port_from: 70 - port_to: 70 - source: 0.0.0.0 - - - protocol: TCP - port_from: 60 - port_to: 60 - source: 0.0.0.0 - wait: true - wait_timeout: 500 - state: update - -- name: Remove rules from a firewall policy - community.general.oneandone_firewall_policy: - auth_token: oneandone_private_api_key - firewall_policy: ansible-firewall-policy-updated - remove_rules: - - rule_id #1 - - rule_id #2 - - ... - wait: true - wait_timeout: 500 - state: update -''' - -RETURN = ''' -firewall_policy: - description: Information about the firewall policy that was processed - type: dict - sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}' - returned: always -''' - -import os -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.oneandone import ( - get_firewall_policy, - get_server, - OneAndOneResources, - wait_for_resource_creation_completion -) - -HAS_ONEANDONE_SDK = True - -try: - import oneandone.client -except ImportError: - HAS_ONEANDONE_SDK = False - - -def _check_mode(module, result): - if module.check_mode: - module.exit_json( - changed=result - ) - - -def _add_server_ips(module, oneandone_conn, firewall_id, server_ids): - """ - Assigns servers to a firewall policy. - """ - try: - attach_servers = [] - - for _server_id in server_ids: - server = get_server(oneandone_conn, _server_id, True) - attach_server = oneandone.client.AttachServer( - server_id=server['id'], - server_ip_id=next(iter(server['ips'] or []), None)['id'] - ) - attach_servers.append(attach_server) - - if module.check_mode: - if attach_servers: - return True - return False - - firewall_policy = oneandone_conn.attach_server_firewall_policy( - firewall_id=firewall_id, - server_ips=attach_servers) - return firewall_policy - except Exception as e: - module.fail_json(msg=str(e)) - - -def _remove_firewall_server(module, oneandone_conn, firewall_id, server_ip_id): - """ - Unassigns a server/IP from a firewall policy. - """ - try: - if module.check_mode: - firewall_server = oneandone_conn.get_firewall_server( - firewall_id=firewall_id, - server_ip_id=server_ip_id) - if firewall_server: - return True - return False - - firewall_policy = oneandone_conn.remove_firewall_server( - firewall_id=firewall_id, - server_ip_id=server_ip_id) - return firewall_policy - except Exception as e: - module.fail_json(msg=str(e)) - - -def _add_firewall_rules(module, oneandone_conn, firewall_id, rules): - """ - Adds new rules to a firewall policy. - """ - try: - firewall_rules = [] - - for rule in rules: - firewall_rule = oneandone.client.FirewallPolicyRule( - protocol=rule['protocol'], - port_from=rule['port_from'], - port_to=rule['port_to'], - source=rule['source']) - firewall_rules.append(firewall_rule) - - if module.check_mode: - firewall_policy_id = get_firewall_policy(oneandone_conn, firewall_id) - if (firewall_rules and firewall_policy_id): - return True - return False - - firewall_policy = oneandone_conn.add_firewall_policy_rule( - firewall_id=firewall_id, - firewall_policy_rules=firewall_rules - ) - return firewall_policy - except Exception as e: - module.fail_json(msg=str(e)) - - -def _remove_firewall_rule(module, oneandone_conn, firewall_id, rule_id): - """ - Removes a rule from a firewall policy. - """ - try: - if module.check_mode: - rule = oneandone_conn.get_firewall_policy_rule( - firewall_id=firewall_id, - rule_id=rule_id) - if rule: - return True - return False - - firewall_policy = oneandone_conn.remove_firewall_rule( - firewall_id=firewall_id, - rule_id=rule_id - ) - return firewall_policy - except Exception as e: - module.fail_json(msg=str(e)) - - -def update_firewall_policy(module, oneandone_conn): - """ - Updates a firewall policy based on input arguments. - Firewall rules and server ips can be added/removed to/from - firewall policy. Firewall policy name and description can be - updated as well. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - """ - try: - firewall_policy_id = module.params.get('firewall_policy') - name = module.params.get('name') - description = module.params.get('description') - add_server_ips = module.params.get('add_server_ips') - remove_server_ips = module.params.get('remove_server_ips') - add_rules = module.params.get('add_rules') - remove_rules = module.params.get('remove_rules') - - changed = False - - firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy_id, True) - if firewall_policy is None: - _check_mode(module, False) - - if name or description: - _check_mode(module, True) - firewall_policy = oneandone_conn.modify_firewall( - firewall_id=firewall_policy['id'], - name=name, - description=description) - changed = True - - if add_server_ips: - if module.check_mode: - _check_mode(module, _add_server_ips(module, - oneandone_conn, - firewall_policy['id'], - add_server_ips)) - - firewall_policy = _add_server_ips(module, oneandone_conn, firewall_policy['id'], add_server_ips) - changed = True - - if remove_server_ips: - chk_changed = False - for server_ip_id in remove_server_ips: - if module.check_mode: - chk_changed |= _remove_firewall_server(module, - oneandone_conn, - firewall_policy['id'], - server_ip_id) - - _remove_firewall_server(module, - oneandone_conn, - firewall_policy['id'], - server_ip_id) - _check_mode(module, chk_changed) - firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True) - changed = True - - if add_rules: - firewall_policy = _add_firewall_rules(module, - oneandone_conn, - firewall_policy['id'], - add_rules) - _check_mode(module, firewall_policy) - changed = True - - if remove_rules: - chk_changed = False - for rule_id in remove_rules: - if module.check_mode: - chk_changed |= _remove_firewall_rule(module, - oneandone_conn, - firewall_policy['id'], - rule_id) - - _remove_firewall_rule(module, - oneandone_conn, - firewall_policy['id'], - rule_id) - _check_mode(module, chk_changed) - firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True) - changed = True - - return (changed, firewall_policy) - except Exception as e: - module.fail_json(msg=str(e)) - - -def create_firewall_policy(module, oneandone_conn): - """ - Create a new firewall policy. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - """ - try: - name = module.params.get('name') - description = module.params.get('description') - rules = module.params.get('rules') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - wait_interval = module.params.get('wait_interval') - - firewall_rules = [] - - for rule in rules: - firewall_rule = oneandone.client.FirewallPolicyRule( - protocol=rule['protocol'], - port_from=rule['port_from'], - port_to=rule['port_to'], - source=rule['source']) - firewall_rules.append(firewall_rule) - - firewall_policy_obj = oneandone.client.FirewallPolicy( - name=name, - description=description - ) - - _check_mode(module, True) - firewall_policy = oneandone_conn.create_firewall_policy( - firewall_policy=firewall_policy_obj, - firewall_policy_rules=firewall_rules - ) - - if wait: - wait_for_resource_creation_completion( - oneandone_conn, - OneAndOneResources.firewall_policy, - firewall_policy['id'], - wait_timeout, - wait_interval) - - firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True) # refresh - changed = True if firewall_policy else False - - _check_mode(module, False) - - return (changed, firewall_policy) - except Exception as e: - module.fail_json(msg=str(e)) - - -def remove_firewall_policy(module, oneandone_conn): - """ - Removes a firewall policy. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - """ - try: - fp_id = module.params.get('name') - firewall_policy_id = get_firewall_policy(oneandone_conn, fp_id) - if module.check_mode: - if firewall_policy_id is None: - _check_mode(module, False) - _check_mode(module, True) - firewall_policy = oneandone_conn.delete_firewall(firewall_policy_id) - - changed = True if firewall_policy else False - - return (changed, { - 'id': firewall_policy['id'], - 'name': firewall_policy['name'] - }) - except Exception as e: - module.fail_json(msg=str(e)) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - auth_token=dict( - type='str', no_log=True, - default=os.environ.get('ONEANDONE_AUTH_TOKEN')), - api_url=dict( - type='str', - default=os.environ.get('ONEANDONE_API_URL')), - name=dict(type='str'), - firewall_policy=dict(type='str'), - description=dict(type='str'), - rules=dict(type='list', elements="dict", default=[]), - add_server_ips=dict(type='list', elements="str", default=[]), - remove_server_ips=dict(type='list', elements="str", default=[]), - add_rules=dict(type='list', elements="dict", default=[]), - remove_rules=dict(type='list', elements="str", default=[]), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - wait_interval=dict(type='int', default=5), - state=dict(type='str', default='present', choices=['present', 'absent', 'update']), - ), - supports_check_mode=True - ) - - if not HAS_ONEANDONE_SDK: - module.fail_json(msg='1and1 required for this module') - - if not module.params.get('auth_token'): - module.fail_json( - msg='The "auth_token" parameter or ' + - 'ONEANDONE_AUTH_TOKEN environment variable is required.') - - if not module.params.get('api_url'): - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token')) - else: - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) - - state = module.params.get('state') - - if state == 'absent': - if not module.params.get('name'): - module.fail_json( - msg="'name' parameter is required to delete a firewall policy.") - try: - (changed, firewall_policy) = remove_firewall_policy(module, oneandone_conn) - except Exception as e: - module.fail_json(msg=str(e)) - - elif state == 'update': - if not module.params.get('firewall_policy'): - module.fail_json( - msg="'firewall_policy' parameter is required to update a firewall policy.") - try: - (changed, firewall_policy) = update_firewall_policy(module, oneandone_conn) - except Exception as e: - module.fail_json(msg=str(e)) - - elif state == 'present': - for param in ('name', 'rules'): - if not module.params.get(param): - module.fail_json( - msg="%s parameter is required for new firewall policies." % param) - try: - (changed, firewall_policy) = create_firewall_policy(module, oneandone_conn) - except Exception as e: - module.fail_json(msg=str(e)) - - module.exit_json(changed=changed, firewall_policy=firewall_policy) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_load_balancer.py b/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_load_balancer.py deleted file mode 100644 index 5f541a87..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_load_balancer.py +++ /dev/null @@ -1,683 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: oneandone_load_balancer -short_description: Configure 1&1 load balancer. -description: - - Create, remove, update load balancers. - This module has a dependency on 1and1 >= 1.0 -options: - state: - description: - - Define a load balancer state to create, remove, or update. - type: str - required: false - default: 'present' - choices: [ "present", "absent", "update" ] - auth_token: - description: - - Authenticating API token provided by 1&1. - type: str - load_balancer: - description: - - The identifier (id or name) of the load balancer used with update state. - type: str - api_url: - description: - - Custom API URL. Overrides the - ONEANDONE_API_URL environment variable. - type: str - required: false - name: - description: - - Load balancer name used with present state. Used as identifier (id or name) when used with absent state. - maxLength=128 - type: str - health_check_test: - description: - - Type of the health check. At the moment, HTTP is not allowed. - type: str - choices: [ "NONE", "TCP", "HTTP", "ICMP" ] - health_check_interval: - description: - - Health check period in seconds. minimum=5, maximum=300, multipleOf=1 - type: str - health_check_path: - description: - - Url to call for checking. Required for HTTP health check. maxLength=1000 - type: str - required: false - health_check_parse: - description: - - Regular expression to check. Required for HTTP health check. maxLength=64 - type: str - required: false - persistence: - description: - - Persistence. - type: bool - persistence_time: - description: - - Persistence time in seconds. Required if persistence is enabled. minimum=30, maximum=1200, multipleOf=1 - type: str - method: - description: - - Balancing procedure. - type: str - choices: [ "ROUND_ROBIN", "LEAST_CONNECTIONS" ] - datacenter: - description: - - ID or country code of the datacenter where the load balancer will be created. - - If not specified, it defaults to I(US). - type: str - choices: [ "US", "ES", "DE", "GB" ] - required: false - rules: - description: - - A list of rule objects that will be set for the load balancer. Each rule must contain protocol, - port_balancer, and port_server parameters, in addition to source parameter, which is optional. - type: list - elements: dict - description: - description: - - Description of the load balancer. maxLength=256 - type: str - required: false - add_server_ips: - description: - - A list of server identifiers (id or name) to be assigned to a load balancer. - Used in combination with update state. - type: list - elements: str - required: false - remove_server_ips: - description: - - A list of server IP ids to be unassigned from a load balancer. Used in combination with update state. - type: list - elements: str - required: false - add_rules: - description: - - A list of rules that will be added to an existing load balancer. - It is syntax is the same as the one used for rules parameter. Used in combination with update state. - type: list - elements: dict - required: false - remove_rules: - description: - - A list of rule ids that will be removed from an existing load balancer. Used in combination with update state. - type: list - elements: str - required: false - wait: - description: - - wait for the instance to be in state 'running' before returning - required: false - default: "yes" - type: bool - wait_timeout: - description: - - how long before wait gives up, in seconds - type: int - default: 600 - wait_interval: - description: - - Defines the number of seconds to wait when using the _wait_for methods - type: int - default: 5 - -requirements: - - "1and1" - - "python >= 2.6" - -author: - - Amel Ajdinovic (@aajdinov) - - Ethan Devenport (@edevenport) -''' - -EXAMPLES = ''' -- name: Create a load balancer - community.general.oneandone_load_balancer: - auth_token: oneandone_private_api_key - name: ansible load balancer - description: Testing creation of load balancer with ansible - health_check_test: TCP - health_check_interval: 40 - persistence: true - persistence_time: 1200 - method: ROUND_ROBIN - datacenter: US - rules: - - - protocol: TCP - port_balancer: 80 - port_server: 80 - source: 0.0.0.0 - wait: true - wait_timeout: 500 - -- name: Destroy a load balancer - community.general.oneandone_load_balancer: - auth_token: oneandone_private_api_key - name: ansible load balancer - wait: true - wait_timeout: 500 - state: absent - -- name: Update a load balancer - community.general.oneandone_load_balancer: - auth_token: oneandone_private_api_key - load_balancer: ansible load balancer - name: ansible load balancer updated - description: Testing the update of a load balancer with ansible - wait: true - wait_timeout: 500 - state: update - -- name: Add server to a load balancer - community.general.oneandone_load_balancer: - auth_token: oneandone_private_api_key - load_balancer: ansible load balancer updated - description: Adding server to a load balancer with ansible - add_server_ips: - - server identifier (id or name) - wait: true - wait_timeout: 500 - state: update - -- name: Remove server from a load balancer - community.general.oneandone_load_balancer: - auth_token: oneandone_private_api_key - load_balancer: ansible load balancer updated - description: Removing server from a load balancer with ansible - remove_server_ips: - - B2504878540DBC5F7634EB00A07C1EBD (server's ip id) - wait: true - wait_timeout: 500 - state: update - -- name: Add rules to a load balancer - community.general.oneandone_load_balancer: - auth_token: oneandone_private_api_key - load_balancer: ansible load balancer updated - description: Adding rules to a load balancer with ansible - add_rules: - - - protocol: TCP - port_balancer: 70 - port_server: 70 - source: 0.0.0.0 - - - protocol: TCP - port_balancer: 60 - port_server: 60 - source: 0.0.0.0 - wait: true - wait_timeout: 500 - state: update - -- name: Remove rules from a load balancer - community.general.oneandone_load_balancer: - auth_token: oneandone_private_api_key - load_balancer: ansible load balancer updated - description: Adding rules to a load balancer with ansible - remove_rules: - - rule_id #1 - - rule_id #2 - - ... - wait: true - wait_timeout: 500 - state: update -''' - -RETURN = ''' -load_balancer: - description: Information about the load balancer that was processed - type: dict - sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Balancer"}' - returned: always -''' - -import os -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.oneandone import ( - get_load_balancer, - get_server, - get_datacenter, - OneAndOneResources, - wait_for_resource_creation_completion -) - -HAS_ONEANDONE_SDK = True - -try: - import oneandone.client -except ImportError: - HAS_ONEANDONE_SDK = False - -DATACENTERS = ['US', 'ES', 'DE', 'GB'] -HEALTH_CHECK_TESTS = ['NONE', 'TCP', 'HTTP', 'ICMP'] -METHODS = ['ROUND_ROBIN', 'LEAST_CONNECTIONS'] - - -def _check_mode(module, result): - if module.check_mode: - module.exit_json( - changed=result - ) - - -def _add_server_ips(module, oneandone_conn, load_balancer_id, server_ids): - """ - Assigns servers to a load balancer. - """ - try: - attach_servers = [] - - for server_id in server_ids: - server = get_server(oneandone_conn, server_id, True) - attach_server = oneandone.client.AttachServer( - server_id=server['id'], - server_ip_id=next(iter(server['ips'] or []), None)['id'] - ) - attach_servers.append(attach_server) - - if module.check_mode: - if attach_servers: - return True - return False - - load_balancer = oneandone_conn.attach_load_balancer_server( - load_balancer_id=load_balancer_id, - server_ips=attach_servers) - return load_balancer - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _remove_load_balancer_server(module, oneandone_conn, load_balancer_id, server_ip_id): - """ - Unassigns a server/IP from a load balancer. - """ - try: - if module.check_mode: - lb_server = oneandone_conn.get_load_balancer_server( - load_balancer_id=load_balancer_id, - server_ip_id=server_ip_id) - if lb_server: - return True - return False - - load_balancer = oneandone_conn.remove_load_balancer_server( - load_balancer_id=load_balancer_id, - server_ip_id=server_ip_id) - return load_balancer - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _add_load_balancer_rules(module, oneandone_conn, load_balancer_id, rules): - """ - Adds new rules to a load_balancer. - """ - try: - load_balancer_rules = [] - - for rule in rules: - load_balancer_rule = oneandone.client.LoadBalancerRule( - protocol=rule['protocol'], - port_balancer=rule['port_balancer'], - port_server=rule['port_server'], - source=rule['source']) - load_balancer_rules.append(load_balancer_rule) - - if module.check_mode: - lb_id = get_load_balancer(oneandone_conn, load_balancer_id) - if (load_balancer_rules and lb_id): - return True - return False - - load_balancer = oneandone_conn.add_load_balancer_rule( - load_balancer_id=load_balancer_id, - load_balancer_rules=load_balancer_rules - ) - - return load_balancer - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _remove_load_balancer_rule(module, oneandone_conn, load_balancer_id, rule_id): - """ - Removes a rule from a load_balancer. - """ - try: - if module.check_mode: - rule = oneandone_conn.get_load_balancer_rule( - load_balancer_id=load_balancer_id, - rule_id=rule_id) - if rule: - return True - return False - - load_balancer = oneandone_conn.remove_load_balancer_rule( - load_balancer_id=load_balancer_id, - rule_id=rule_id - ) - return load_balancer - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def update_load_balancer(module, oneandone_conn): - """ - Updates a load_balancer based on input arguments. - Load balancer rules and server ips can be added/removed to/from - load balancer. Load balancer name, description, health_check_test, - health_check_interval, persistence, persistence_time, and method - can be updated as well. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - """ - load_balancer_id = module.params.get('load_balancer') - name = module.params.get('name') - description = module.params.get('description') - health_check_test = module.params.get('health_check_test') - health_check_interval = module.params.get('health_check_interval') - health_check_path = module.params.get('health_check_path') - health_check_parse = module.params.get('health_check_parse') - persistence = module.params.get('persistence') - persistence_time = module.params.get('persistence_time') - method = module.params.get('method') - add_server_ips = module.params.get('add_server_ips') - remove_server_ips = module.params.get('remove_server_ips') - add_rules = module.params.get('add_rules') - remove_rules = module.params.get('remove_rules') - - changed = False - - load_balancer = get_load_balancer(oneandone_conn, load_balancer_id, True) - if load_balancer is None: - _check_mode(module, False) - - if (name or description or health_check_test or health_check_interval or health_check_path or - health_check_parse or persistence or persistence_time or method): - _check_mode(module, True) - load_balancer = oneandone_conn.modify_load_balancer( - load_balancer_id=load_balancer['id'], - name=name, - description=description, - health_check_test=health_check_test, - health_check_interval=health_check_interval, - health_check_path=health_check_path, - health_check_parse=health_check_parse, - persistence=persistence, - persistence_time=persistence_time, - method=method) - changed = True - - if add_server_ips: - if module.check_mode: - _check_mode(module, _add_server_ips(module, - oneandone_conn, - load_balancer['id'], - add_server_ips)) - - load_balancer = _add_server_ips(module, oneandone_conn, load_balancer['id'], add_server_ips) - changed = True - - if remove_server_ips: - chk_changed = False - for server_ip_id in remove_server_ips: - if module.check_mode: - chk_changed |= _remove_load_balancer_server(module, - oneandone_conn, - load_balancer['id'], - server_ip_id) - - _remove_load_balancer_server(module, - oneandone_conn, - load_balancer['id'], - server_ip_id) - _check_mode(module, chk_changed) - load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True) - changed = True - - if add_rules: - load_balancer = _add_load_balancer_rules(module, - oneandone_conn, - load_balancer['id'], - add_rules) - _check_mode(module, load_balancer) - changed = True - - if remove_rules: - chk_changed = False - for rule_id in remove_rules: - if module.check_mode: - chk_changed |= _remove_load_balancer_rule(module, - oneandone_conn, - load_balancer['id'], - rule_id) - - _remove_load_balancer_rule(module, - oneandone_conn, - load_balancer['id'], - rule_id) - _check_mode(module, chk_changed) - load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True) - changed = True - - try: - return (changed, load_balancer) - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def create_load_balancer(module, oneandone_conn): - """ - Create a new load_balancer. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - """ - try: - name = module.params.get('name') - description = module.params.get('description') - health_check_test = module.params.get('health_check_test') - health_check_interval = module.params.get('health_check_interval') - health_check_path = module.params.get('health_check_path') - health_check_parse = module.params.get('health_check_parse') - persistence = module.params.get('persistence') - persistence_time = module.params.get('persistence_time') - method = module.params.get('method') - datacenter = module.params.get('datacenter') - rules = module.params.get('rules') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - wait_interval = module.params.get('wait_interval') - - load_balancer_rules = [] - - datacenter_id = None - if datacenter is not None: - datacenter_id = get_datacenter(oneandone_conn, datacenter) - if datacenter_id is None: - module.fail_json( - msg='datacenter %s not found.' % datacenter) - - for rule in rules: - load_balancer_rule = oneandone.client.LoadBalancerRule( - protocol=rule['protocol'], - port_balancer=rule['port_balancer'], - port_server=rule['port_server'], - source=rule['source']) - load_balancer_rules.append(load_balancer_rule) - - _check_mode(module, True) - load_balancer_obj = oneandone.client.LoadBalancer( - health_check_path=health_check_path, - health_check_parse=health_check_parse, - name=name, - description=description, - health_check_test=health_check_test, - health_check_interval=health_check_interval, - persistence=persistence, - persistence_time=persistence_time, - method=method, - datacenter_id=datacenter_id - ) - - load_balancer = oneandone_conn.create_load_balancer( - load_balancer=load_balancer_obj, - load_balancer_rules=load_balancer_rules - ) - - if wait: - wait_for_resource_creation_completion(oneandone_conn, - OneAndOneResources.load_balancer, - load_balancer['id'], - wait_timeout, - wait_interval) - - load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True) # refresh - changed = True if load_balancer else False - - _check_mode(module, False) - - return (changed, load_balancer) - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def remove_load_balancer(module, oneandone_conn): - """ - Removes a load_balancer. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - """ - try: - lb_id = module.params.get('name') - load_balancer_id = get_load_balancer(oneandone_conn, lb_id) - if module.check_mode: - if load_balancer_id is None: - _check_mode(module, False) - _check_mode(module, True) - load_balancer = oneandone_conn.delete_load_balancer(load_balancer_id) - - changed = True if load_balancer else False - - return (changed, { - 'id': load_balancer['id'], - 'name': load_balancer['name'] - }) - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - auth_token=dict( - type='str', no_log=True, - default=os.environ.get('ONEANDONE_AUTH_TOKEN')), - api_url=dict( - type='str', - default=os.environ.get('ONEANDONE_API_URL')), - load_balancer=dict(type='str'), - name=dict(type='str'), - description=dict(type='str'), - health_check_test=dict( - choices=HEALTH_CHECK_TESTS), - health_check_interval=dict(type='str'), - health_check_path=dict(type='str'), - health_check_parse=dict(type='str'), - persistence=dict(type='bool'), - persistence_time=dict(type='str'), - method=dict( - choices=METHODS), - datacenter=dict( - choices=DATACENTERS), - rules=dict(type='list', elements="dict", default=[]), - add_server_ips=dict(type='list', elements="str", default=[]), - remove_server_ips=dict(type='list', elements="str", default=[]), - add_rules=dict(type='list', elements="dict", default=[]), - remove_rules=dict(type='list', elements="str", default=[]), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - wait_interval=dict(type='int', default=5), - state=dict(type='str', default='present', choices=['present', 'absent', 'update']), - ), - supports_check_mode=True - ) - - if not HAS_ONEANDONE_SDK: - module.fail_json(msg='1and1 required for this module') - - if not module.params.get('auth_token'): - module.fail_json( - msg='auth_token parameter is required.') - - if not module.params.get('api_url'): - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token')) - else: - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) - - state = module.params.get('state') - - if state == 'absent': - if not module.params.get('name'): - module.fail_json( - msg="'name' parameter is required for deleting a load balancer.") - try: - (changed, load_balancer) = remove_load_balancer(module, oneandone_conn) - except Exception as ex: - module.fail_json(msg=str(ex)) - elif state == 'update': - if not module.params.get('load_balancer'): - module.fail_json( - msg="'load_balancer' parameter is required for updating a load balancer.") - try: - (changed, load_balancer) = update_load_balancer(module, oneandone_conn) - except Exception as ex: - module.fail_json(msg=str(ex)) - - elif state == 'present': - for param in ('name', 'health_check_test', 'health_check_interval', 'persistence', - 'persistence_time', 'method', 'rules'): - if not module.params.get(param): - module.fail_json( - msg="%s parameter is required for new load balancers." % param) - try: - (changed, load_balancer) = create_load_balancer(module, oneandone_conn) - except Exception as ex: - module.fail_json(msg=str(ex)) - - module.exit_json(changed=changed, load_balancer=load_balancer) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py b/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py deleted file mode 100644 index 28dd0d41..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py +++ /dev/null @@ -1,1038 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: oneandone_monitoring_policy -short_description: Configure 1&1 monitoring policy. -description: - - Create, remove, update monitoring policies - (and add/remove ports, processes, and servers). - This module has a dependency on 1and1 >= 1.0 -options: - state: - description: - - Define a monitoring policy's state to create, remove, update. - type: str - required: false - default: present - choices: [ "present", "absent", "update" ] - auth_token: - description: - - Authenticating API token provided by 1&1. - type: str - api_url: - description: - - Custom API URL. Overrides the - ONEANDONE_API_URL environment variable. - type: str - required: false - name: - description: - - Monitoring policy name used with present state. Used as identifier (id or name) when used with absent state. maxLength=128 - type: str - monitoring_policy: - description: - - The identifier (id or name) of the monitoring policy used with update state. - type: str - agent: - description: - - Set true for using agent. - type: str - email: - description: - - User's email. maxLength=128 - type: str - description: - description: - - Monitoring policy description. maxLength=256 - type: str - required: false - thresholds: - description: - - Monitoring policy thresholds. Each of the suboptions have warning and critical, - which both have alert and value suboptions. Warning is used to set limits for - warning alerts, critical is used to set critical alerts. alert enables alert, - and value is used to advise when the value is exceeded. - type: list - elements: dict - suboptions: - cpu: - description: - - Consumption limits of CPU. - required: true - ram: - description: - - Consumption limits of RAM. - required: true - disk: - description: - - Consumption limits of hard disk. - required: true - internal_ping: - description: - - Response limits of internal ping. - required: true - transfer: - description: - - Consumption limits for transfer. - required: true - ports: - description: - - Array of ports that will be monitoring. - type: list - elements: dict - suboptions: - protocol: - description: - - Internet protocol. - choices: [ "TCP", "UDP" ] - required: true - port: - description: - - Port number. minimum=1, maximum=65535 - required: true - alert_if: - description: - - Case of alert. - choices: [ "RESPONDING", "NOT_RESPONDING" ] - required: true - email_notification: - description: - - Set true for sending e-mail notifications. - required: true - processes: - description: - - Array of processes that will be monitoring. - type: list - elements: dict - suboptions: - process: - description: - - Name of the process. maxLength=50 - required: true - alert_if: - description: - - Case of alert. - choices: [ "RUNNING", "NOT_RUNNING" ] - required: true - add_ports: - description: - - Ports to add to the monitoring policy. - type: list - elements: dict - required: false - add_processes: - description: - - Processes to add to the monitoring policy. - type: list - elements: dict - required: false - add_servers: - description: - - Servers to add to the monitoring policy. - type: list - elements: str - required: false - remove_ports: - description: - - Ports to remove from the monitoring policy. - type: list - elements: str - required: false - remove_processes: - description: - - Processes to remove from the monitoring policy. - type: list - elements: str - required: false - remove_servers: - description: - - Servers to remove from the monitoring policy. - type: list - elements: str - required: false - update_ports: - description: - - Ports to be updated on the monitoring policy. - type: list - elements: dict - required: false - update_processes: - description: - - Processes to be updated on the monitoring policy. - type: list - elements: dict - required: false - wait: - description: - - wait for the instance to be in state 'running' before returning - required: false - default: "yes" - type: bool - wait_timeout: - description: - - how long before wait gives up, in seconds - type: int - default: 600 - wait_interval: - description: - - Defines the number of seconds to wait when using the _wait_for methods - type: int - default: 5 - -requirements: - - "1and1" - - "python >= 2.6" - -author: - - "Amel Ajdinovic (@aajdinov)" - - "Ethan Devenport (@edevenport)" -''' - -EXAMPLES = ''' -- name: Create a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - name: ansible monitoring policy - description: Testing creation of a monitoring policy with ansible - email: your@emailaddress.com - agent: true - thresholds: - - - cpu: - warning: - value: 80 - alert: false - critical: - value: 92 - alert: false - - - ram: - warning: - value: 80 - alert: false - critical: - value: 90 - alert: false - - - disk: - warning: - value: 80 - alert: false - critical: - value: 90 - alert: false - - - internal_ping: - warning: - value: 50 - alert: false - critical: - value: 100 - alert: false - - - transfer: - warning: - value: 1000 - alert: false - critical: - value: 2000 - alert: false - ports: - - - protocol: TCP - port: 22 - alert_if: RESPONDING - email_notification: false - processes: - - - process: test - alert_if: NOT_RUNNING - email_notification: false - wait: true - -- name: Destroy a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - state: absent - name: ansible monitoring policy - -- name: Update a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - monitoring_policy: ansible monitoring policy - name: ansible monitoring policy updated - description: Testing creation of a monitoring policy with ansible updated - email: another@emailaddress.com - thresholds: - - - cpu: - warning: - value: 70 - alert: false - critical: - value: 90 - alert: false - - - ram: - warning: - value: 70 - alert: false - critical: - value: 80 - alert: false - - - disk: - warning: - value: 70 - alert: false - critical: - value: 80 - alert: false - - - internal_ping: - warning: - value: 60 - alert: false - critical: - value: 90 - alert: false - - - transfer: - warning: - value: 900 - alert: false - critical: - value: 1900 - alert: false - wait: true - state: update - -- name: Add a port to a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - monitoring_policy: ansible monitoring policy updated - add_ports: - - - protocol: TCP - port: 33 - alert_if: RESPONDING - email_notification: false - wait: true - state: update - -- name: Update existing ports of a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - monitoring_policy: ansible monitoring policy updated - update_ports: - - - id: existing_port_id - protocol: TCP - port: 34 - alert_if: RESPONDING - email_notification: false - - - id: existing_port_id - protocol: TCP - port: 23 - alert_if: RESPONDING - email_notification: false - wait: true - state: update - -- name: Remove a port from a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - monitoring_policy: ansible monitoring policy updated - remove_ports: - - port_id - state: update - -- name: Add a process to a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - monitoring_policy: ansible monitoring policy updated - add_processes: - - - process: test_2 - alert_if: NOT_RUNNING - email_notification: false - wait: true - state: update - -- name: Update existing processes of a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - monitoring_policy: ansible monitoring policy updated - update_processes: - - - id: process_id - process: test_1 - alert_if: NOT_RUNNING - email_notification: false - - - id: process_id - process: test_3 - alert_if: NOT_RUNNING - email_notification: false - wait: true - state: update - -- name: Remove a process from a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - monitoring_policy: ansible monitoring policy updated - remove_processes: - - process_id - wait: true - state: update - -- name: Add server to a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - monitoring_policy: ansible monitoring policy updated - add_servers: - - server id or name - wait: true - state: update - -- name: Remove server from a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - monitoring_policy: ansible monitoring policy updated - remove_servers: - - server01 - wait: true - state: update -''' - -RETURN = ''' -monitoring_policy: - description: Information about the monitoring policy that was processed - type: dict - sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}' - returned: always -''' - -import os -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.oneandone import ( - get_monitoring_policy, - get_server, - OneAndOneResources, - wait_for_resource_creation_completion -) - -HAS_ONEANDONE_SDK = True - -try: - import oneandone.client -except ImportError: - HAS_ONEANDONE_SDK = False - - -def _check_mode(module, result): - if module.check_mode: - module.exit_json( - changed=result - ) - - -def _add_ports(module, oneandone_conn, monitoring_policy_id, ports): - """ - Adds new ports to a monitoring policy. - """ - try: - monitoring_policy_ports = [] - - for _port in ports: - monitoring_policy_port = oneandone.client.Port( - protocol=_port['protocol'], - port=_port['port'], - alert_if=_port['alert_if'], - email_notification=_port['email_notification'] - ) - monitoring_policy_ports.append(monitoring_policy_port) - - if module.check_mode: - if monitoring_policy_ports: - return True - return False - - monitoring_policy = oneandone_conn.add_port( - monitoring_policy_id=monitoring_policy_id, - ports=monitoring_policy_ports) - return monitoring_policy - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _delete_monitoring_policy_port(module, oneandone_conn, monitoring_policy_id, port_id): - """ - Removes a port from a monitoring policy. - """ - try: - if module.check_mode: - monitoring_policy = oneandone_conn.delete_monitoring_policy_port( - monitoring_policy_id=monitoring_policy_id, - port_id=port_id) - if monitoring_policy: - return True - return False - - monitoring_policy = oneandone_conn.delete_monitoring_policy_port( - monitoring_policy_id=monitoring_policy_id, - port_id=port_id) - return monitoring_policy - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _modify_port(module, oneandone_conn, monitoring_policy_id, port_id, port): - """ - Modifies a monitoring policy port. - """ - try: - if module.check_mode: - cm_port = oneandone_conn.get_monitoring_policy_port( - monitoring_policy_id=monitoring_policy_id, - port_id=port_id) - if cm_port: - return True - return False - - monitoring_policy_port = oneandone.client.Port( - protocol=port['protocol'], - port=port['port'], - alert_if=port['alert_if'], - email_notification=port['email_notification'] - ) - - monitoring_policy = oneandone_conn.modify_port( - monitoring_policy_id=monitoring_policy_id, - port_id=port_id, - port=monitoring_policy_port) - return monitoring_policy - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _add_processes(module, oneandone_conn, monitoring_policy_id, processes): - """ - Adds new processes to a monitoring policy. - """ - try: - monitoring_policy_processes = [] - - for _process in processes: - monitoring_policy_process = oneandone.client.Process( - process=_process['process'], - alert_if=_process['alert_if'], - email_notification=_process['email_notification'] - ) - monitoring_policy_processes.append(monitoring_policy_process) - - if module.check_mode: - mp_id = get_monitoring_policy(oneandone_conn, monitoring_policy_id) - if (monitoring_policy_processes and mp_id): - return True - return False - - monitoring_policy = oneandone_conn.add_process( - monitoring_policy_id=monitoring_policy_id, - processes=monitoring_policy_processes) - return monitoring_policy - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _delete_monitoring_policy_process(module, oneandone_conn, monitoring_policy_id, process_id): - """ - Removes a process from a monitoring policy. - """ - try: - if module.check_mode: - process = oneandone_conn.get_monitoring_policy_process( - monitoring_policy_id=monitoring_policy_id, - process_id=process_id - ) - if process: - return True - return False - - monitoring_policy = oneandone_conn.delete_monitoring_policy_process( - monitoring_policy_id=monitoring_policy_id, - process_id=process_id) - return monitoring_policy - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _modify_process(module, oneandone_conn, monitoring_policy_id, process_id, process): - """ - Modifies a monitoring policy process. - """ - try: - if module.check_mode: - cm_process = oneandone_conn.get_monitoring_policy_process( - monitoring_policy_id=monitoring_policy_id, - process_id=process_id) - if cm_process: - return True - return False - - monitoring_policy_process = oneandone.client.Process( - process=process['process'], - alert_if=process['alert_if'], - email_notification=process['email_notification'] - ) - - monitoring_policy = oneandone_conn.modify_process( - monitoring_policy_id=monitoring_policy_id, - process_id=process_id, - process=monitoring_policy_process) - return monitoring_policy - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _attach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_id, servers): - """ - Attaches servers to a monitoring policy. - """ - try: - attach_servers = [] - - for _server_id in servers: - server_id = get_server(oneandone_conn, _server_id) - attach_server = oneandone.client.AttachServer( - server_id=server_id - ) - attach_servers.append(attach_server) - - if module.check_mode: - if attach_servers: - return True - return False - - monitoring_policy = oneandone_conn.attach_monitoring_policy_server( - monitoring_policy_id=monitoring_policy_id, - servers=attach_servers) - return monitoring_policy - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _detach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_id, server_id): - """ - Detaches a server from a monitoring policy. - """ - try: - if module.check_mode: - mp_server = oneandone_conn.get_monitoring_policy_server( - monitoring_policy_id=monitoring_policy_id, - server_id=server_id) - if mp_server: - return True - return False - - monitoring_policy = oneandone_conn.detach_monitoring_policy_server( - monitoring_policy_id=monitoring_policy_id, - server_id=server_id) - return monitoring_policy - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def update_monitoring_policy(module, oneandone_conn): - """ - Updates a monitoring_policy based on input arguments. - Monitoring policy ports, processes and servers can be added/removed to/from - a monitoring policy. Monitoring policy name, description, email, - thresholds for cpu, ram, disk, transfer and internal_ping - can be updated as well. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - """ - try: - monitoring_policy_id = module.params.get('monitoring_policy') - name = module.params.get('name') - description = module.params.get('description') - email = module.params.get('email') - thresholds = module.params.get('thresholds') - add_ports = module.params.get('add_ports') - update_ports = module.params.get('update_ports') - remove_ports = module.params.get('remove_ports') - add_processes = module.params.get('add_processes') - update_processes = module.params.get('update_processes') - remove_processes = module.params.get('remove_processes') - add_servers = module.params.get('add_servers') - remove_servers = module.params.get('remove_servers') - - changed = False - - monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy_id, True) - if monitoring_policy is None: - _check_mode(module, False) - - _monitoring_policy = oneandone.client.MonitoringPolicy( - name=name, - description=description, - email=email - ) - - _thresholds = None - - if thresholds: - threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer'] - - _thresholds = [] - for threshold in thresholds: - key = list(threshold.keys())[0] - if key in threshold_entities: - _threshold = oneandone.client.Threshold( - entity=key, - warning_value=threshold[key]['warning']['value'], - warning_alert=str(threshold[key]['warning']['alert']).lower(), - critical_value=threshold[key]['critical']['value'], - critical_alert=str(threshold[key]['critical']['alert']).lower()) - _thresholds.append(_threshold) - - if name or description or email or thresholds: - _check_mode(module, True) - monitoring_policy = oneandone_conn.modify_monitoring_policy( - monitoring_policy_id=monitoring_policy['id'], - monitoring_policy=_monitoring_policy, - thresholds=_thresholds) - changed = True - - if add_ports: - if module.check_mode: - _check_mode(module, _add_ports(module, - oneandone_conn, - monitoring_policy['id'], - add_ports)) - - monitoring_policy = _add_ports(module, oneandone_conn, monitoring_policy['id'], add_ports) - changed = True - - if update_ports: - chk_changed = False - for update_port in update_ports: - if module.check_mode: - chk_changed |= _modify_port(module, - oneandone_conn, - monitoring_policy['id'], - update_port['id'], - update_port) - - _modify_port(module, - oneandone_conn, - monitoring_policy['id'], - update_port['id'], - update_port) - monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True) - changed = True - - if remove_ports: - chk_changed = False - for port_id in remove_ports: - if module.check_mode: - chk_changed |= _delete_monitoring_policy_port(module, - oneandone_conn, - monitoring_policy['id'], - port_id) - - _delete_monitoring_policy_port(module, - oneandone_conn, - monitoring_policy['id'], - port_id) - _check_mode(module, chk_changed) - monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True) - changed = True - - if add_processes: - monitoring_policy = _add_processes(module, - oneandone_conn, - monitoring_policy['id'], - add_processes) - _check_mode(module, monitoring_policy) - changed = True - - if update_processes: - chk_changed = False - for update_process in update_processes: - if module.check_mode: - chk_changed |= _modify_process(module, - oneandone_conn, - monitoring_policy['id'], - update_process['id'], - update_process) - - _modify_process(module, - oneandone_conn, - monitoring_policy['id'], - update_process['id'], - update_process) - _check_mode(module, chk_changed) - monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True) - changed = True - - if remove_processes: - chk_changed = False - for process_id in remove_processes: - if module.check_mode: - chk_changed |= _delete_monitoring_policy_process(module, - oneandone_conn, - monitoring_policy['id'], - process_id) - - _delete_monitoring_policy_process(module, - oneandone_conn, - monitoring_policy['id'], - process_id) - _check_mode(module, chk_changed) - monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True) - changed = True - - if add_servers: - monitoring_policy = _attach_monitoring_policy_server(module, - oneandone_conn, - monitoring_policy['id'], - add_servers) - _check_mode(module, monitoring_policy) - changed = True - - if remove_servers: - chk_changed = False - for _server_id in remove_servers: - server_id = get_server(oneandone_conn, _server_id) - - if module.check_mode: - chk_changed |= _detach_monitoring_policy_server(module, - oneandone_conn, - monitoring_policy['id'], - server_id) - - _detach_monitoring_policy_server(module, - oneandone_conn, - monitoring_policy['id'], - server_id) - _check_mode(module, chk_changed) - monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True) - changed = True - - return (changed, monitoring_policy) - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def create_monitoring_policy(module, oneandone_conn): - """ - Creates a new monitoring policy. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - """ - try: - name = module.params.get('name') - description = module.params.get('description') - email = module.params.get('email') - agent = module.params.get('agent') - thresholds = module.params.get('thresholds') - ports = module.params.get('ports') - processes = module.params.get('processes') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - wait_interval = module.params.get('wait_interval') - - _monitoring_policy = oneandone.client.MonitoringPolicy(name, - description, - email, - agent, ) - - _monitoring_policy.specs['agent'] = str(_monitoring_policy.specs['agent']).lower() - - threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer'] - - _thresholds = [] - for threshold in thresholds: - key = list(threshold.keys())[0] - if key in threshold_entities: - _threshold = oneandone.client.Threshold( - entity=key, - warning_value=threshold[key]['warning']['value'], - warning_alert=str(threshold[key]['warning']['alert']).lower(), - critical_value=threshold[key]['critical']['value'], - critical_alert=str(threshold[key]['critical']['alert']).lower()) - _thresholds.append(_threshold) - - _ports = [] - for port in ports: - _port = oneandone.client.Port( - protocol=port['protocol'], - port=port['port'], - alert_if=port['alert_if'], - email_notification=str(port['email_notification']).lower()) - _ports.append(_port) - - _processes = [] - for process in processes: - _process = oneandone.client.Process( - process=process['process'], - alert_if=process['alert_if'], - email_notification=str(process['email_notification']).lower()) - _processes.append(_process) - - _check_mode(module, True) - monitoring_policy = oneandone_conn.create_monitoring_policy( - monitoring_policy=_monitoring_policy, - thresholds=_thresholds, - ports=_ports, - processes=_processes - ) - - if wait: - wait_for_resource_creation_completion( - oneandone_conn, - OneAndOneResources.monitoring_policy, - monitoring_policy['id'], - wait_timeout, - wait_interval) - - changed = True if monitoring_policy else False - - _check_mode(module, False) - - return (changed, monitoring_policy) - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def remove_monitoring_policy(module, oneandone_conn): - """ - Removes a monitoring policy. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - """ - try: - mp_id = module.params.get('name') - monitoring_policy_id = get_monitoring_policy(oneandone_conn, mp_id) - if module.check_mode: - if monitoring_policy_id is None: - _check_mode(module, False) - _check_mode(module, True) - monitoring_policy = oneandone_conn.delete_monitoring_policy(monitoring_policy_id) - - changed = True if monitoring_policy else False - - return (changed, { - 'id': monitoring_policy['id'], - 'name': monitoring_policy['name'] - }) - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - auth_token=dict( - type='str', no_log=True, - default=os.environ.get('ONEANDONE_AUTH_TOKEN')), - api_url=dict( - type='str', - default=os.environ.get('ONEANDONE_API_URL')), - name=dict(type='str'), - monitoring_policy=dict(type='str'), - agent=dict(type='str'), - email=dict(type='str'), - description=dict(type='str'), - thresholds=dict(type='list', elements="dict", default=[]), - ports=dict(type='list', elements="dict", default=[]), - processes=dict(type='list', elements="dict", default=[]), - add_ports=dict(type='list', elements="dict", default=[]), - update_ports=dict(type='list', elements="dict", default=[]), - remove_ports=dict(type='list', elements="str", default=[]), - add_processes=dict(type='list', elements="dict", default=[]), - update_processes=dict(type='list', elements="dict", default=[]), - remove_processes=dict(type='list', elements="str", default=[]), - add_servers=dict(type='list', elements="str", default=[]), - remove_servers=dict(type='list', elements="str", default=[]), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - wait_interval=dict(type='int', default=5), - state=dict(type='str', default='present', choices=['present', 'absent', 'update']), - ), - supports_check_mode=True - ) - - if not HAS_ONEANDONE_SDK: - module.fail_json(msg='1and1 required for this module') - - if not module.params.get('auth_token'): - module.fail_json( - msg='auth_token parameter is required.') - - if not module.params.get('api_url'): - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token')) - else: - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) - - state = module.params.get('state') - - if state == 'absent': - if not module.params.get('name'): - module.fail_json( - msg="'name' parameter is required to delete a monitoring policy.") - try: - (changed, monitoring_policy) = remove_monitoring_policy(module, oneandone_conn) - except Exception as ex: - module.fail_json(msg=str(ex)) - elif state == 'update': - if not module.params.get('monitoring_policy'): - module.fail_json( - msg="'monitoring_policy' parameter is required to update a monitoring policy.") - try: - (changed, monitoring_policy) = update_monitoring_policy(module, oneandone_conn) - except Exception as ex: - module.fail_json(msg=str(ex)) - - elif state == 'present': - for param in ('name', 'agent', 'email', 'thresholds', 'ports', 'processes'): - if not module.params.get(param): - module.fail_json( - msg="%s parameter is required for a new monitoring policy." % param) - try: - (changed, monitoring_policy) = create_monitoring_policy(module, oneandone_conn) - except Exception as ex: - module.fail_json(msg=str(ex)) - - module.exit_json(changed=changed, monitoring_policy=monitoring_policy) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_private_network.py b/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_private_network.py deleted file mode 100644 index 6a16cf68..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_private_network.py +++ /dev/null @@ -1,457 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: oneandone_private_network -short_description: Configure 1&1 private networking. -description: - - Create, remove, reconfigure, update a private network. - This module has a dependency on 1and1 >= 1.0 -options: - state: - description: - - Define a network's state to create, remove, or update. - type: str - required: false - default: 'present' - choices: [ "present", "absent", "update" ] - auth_token: - description: - - Authenticating API token provided by 1&1. - type: str - private_network: - description: - - The identifier (id or name) of the network used with update state. - type: str - api_url: - description: - - Custom API URL. Overrides the - ONEANDONE_API_URL environment variable. - type: str - required: false - name: - description: - - Private network name used with present state. Used as identifier (id or name) when used with absent state. - type: str - description: - description: - - Set a description for the network. - type: str - datacenter: - description: - - The identifier of the datacenter where the private network will be created - type: str - choices: [US, ES, DE, GB] - network_address: - description: - - Set a private network space, i.e. 192.168.1.0 - type: str - subnet_mask: - description: - - Set the netmask for the private network, i.e. 255.255.255.0 - type: str - add_members: - description: - - List of server identifiers (name or id) to be added to the private network. - type: list - elements: str - remove_members: - description: - - List of server identifiers (name or id) to be removed from the private network. - type: list - elements: str - wait: - description: - - wait for the instance to be in state 'running' before returning - required: false - default: "yes" - type: bool - wait_timeout: - description: - - how long before wait gives up, in seconds - type: int - default: 600 - wait_interval: - description: - - Defines the number of seconds to wait when using the _wait_for methods - type: int - default: 5 - -requirements: - - "1and1" - - "python >= 2.6" - -author: - - Amel Ajdinovic (@aajdinov) - - Ethan Devenport (@edevenport) -''' - -EXAMPLES = ''' -- name: Create a private network - community.general.oneandone_private_network: - auth_token: oneandone_private_api_key - name: backup_network - description: Testing creation of a private network with ansible - network_address: 70.35.193.100 - subnet_mask: 255.0.0.0 - datacenter: US - -- name: Destroy a private network - community.general.oneandone_private_network: - auth_token: oneandone_private_api_key - state: absent - name: backup_network - -- name: Modify the private network - community.general.oneandone_private_network: - auth_token: oneandone_private_api_key - state: update - private_network: backup_network - network_address: 192.168.2.0 - subnet_mask: 255.255.255.0 - -- name: Add members to the private network - community.general.oneandone_private_network: - auth_token: oneandone_private_api_key - state: update - private_network: backup_network - add_members: - - server identifier (id or name) - -- name: Remove members from the private network - community.general.oneandone_private_network: - auth_token: oneandone_private_api_key - state: update - private_network: backup_network - remove_members: - - server identifier (id or name) -''' - -RETURN = ''' -private_network: - description: Information about the private network. - type: dict - sample: '{"name": "backup_network", "id": "55726DEDA20C99CF6F2AF8F18CAC9963"}' - returned: always -''' - -import os -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.oneandone import ( - get_private_network, - get_server, - get_datacenter, - OneAndOneResources, - wait_for_resource_creation_completion, - wait_for_resource_deletion_completion -) - -HAS_ONEANDONE_SDK = True - -try: - import oneandone.client -except ImportError: - HAS_ONEANDONE_SDK = False - -DATACENTERS = ['US', 'ES', 'DE', 'GB'] - - -def _check_mode(module, result): - if module.check_mode: - module.exit_json( - changed=result - ) - - -def _add_servers(module, oneandone_conn, name, members): - try: - private_network_id = get_private_network(oneandone_conn, name) - - if module.check_mode: - if private_network_id and members: - return True - return False - - network = oneandone_conn.attach_private_network_servers( - private_network_id=private_network_id, - server_ids=members) - - return network - except Exception as e: - module.fail_json(msg=str(e)) - - -def _remove_member(module, oneandone_conn, name, member_id): - try: - private_network_id = get_private_network(oneandone_conn, name) - - if module.check_mode: - if private_network_id: - network_member = oneandone_conn.get_private_network_server( - private_network_id=private_network_id, - server_id=member_id) - if network_member: - return True - return False - - network = oneandone_conn.remove_private_network_server( - private_network_id=name, - server_id=member_id) - - return network - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def create_network(module, oneandone_conn): - """ - Create new private network - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - - Returns a dictionary containing a 'changed' attribute indicating whether - any network was added. - """ - name = module.params.get('name') - description = module.params.get('description') - network_address = module.params.get('network_address') - subnet_mask = module.params.get('subnet_mask') - datacenter = module.params.get('datacenter') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - wait_interval = module.params.get('wait_interval') - - if datacenter is not None: - datacenter_id = get_datacenter(oneandone_conn, datacenter) - if datacenter_id is None: - module.fail_json( - msg='datacenter %s not found.' % datacenter) - - try: - _check_mode(module, True) - network = oneandone_conn.create_private_network( - private_network=oneandone.client.PrivateNetwork( - name=name, - description=description, - network_address=network_address, - subnet_mask=subnet_mask, - datacenter_id=datacenter_id - )) - - if wait: - wait_for_resource_creation_completion( - oneandone_conn, - OneAndOneResources.private_network, - network['id'], - wait_timeout, - wait_interval) - network = get_private_network(oneandone_conn, - network['id'], - True) - - changed = True if network else False - - _check_mode(module, False) - - return (changed, network) - except Exception as e: - module.fail_json(msg=str(e)) - - -def update_network(module, oneandone_conn): - """ - Modifies a private network. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - """ - try: - _private_network_id = module.params.get('private_network') - _name = module.params.get('name') - _description = module.params.get('description') - _network_address = module.params.get('network_address') - _subnet_mask = module.params.get('subnet_mask') - _add_members = module.params.get('add_members') - _remove_members = module.params.get('remove_members') - - changed = False - - private_network = get_private_network(oneandone_conn, - _private_network_id, - True) - if private_network is None: - _check_mode(module, False) - - if _name or _description or _network_address or _subnet_mask: - _check_mode(module, True) - private_network = oneandone_conn.modify_private_network( - private_network_id=private_network['id'], - name=_name, - description=_description, - network_address=_network_address, - subnet_mask=_subnet_mask) - changed = True - - if _add_members: - instances = [] - - for member in _add_members: - instance_id = get_server(oneandone_conn, member) - instance_obj = oneandone.client.AttachServer(server_id=instance_id) - - instances.extend([instance_obj]) - private_network = _add_servers(module, oneandone_conn, private_network['id'], instances) - _check_mode(module, private_network) - changed = True - - if _remove_members: - chk_changed = False - for member in _remove_members: - instance = get_server(oneandone_conn, member, True) - - if module.check_mode: - chk_changed |= _remove_member(module, - oneandone_conn, - private_network['id'], - instance['id']) - _check_mode(module, instance and chk_changed) - - _remove_member(module, - oneandone_conn, - private_network['id'], - instance['id']) - private_network = get_private_network(oneandone_conn, - private_network['id'], - True) - changed = True - - return (changed, private_network) - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def remove_network(module, oneandone_conn): - """ - Removes a private network. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object. - """ - try: - pn_id = module.params.get('name') - wait_timeout = module.params.get('wait_timeout') - wait_interval = module.params.get('wait_interval') - - private_network_id = get_private_network(oneandone_conn, pn_id) - if module.check_mode: - if private_network_id is None: - _check_mode(module, False) - _check_mode(module, True) - private_network = oneandone_conn.delete_private_network(private_network_id) - wait_for_resource_deletion_completion(oneandone_conn, - OneAndOneResources.private_network, - private_network['id'], - wait_timeout, - wait_interval) - - changed = True if private_network else False - - return (changed, { - 'id': private_network['id'], - 'name': private_network['name'] - }) - except Exception as e: - module.fail_json(msg=str(e)) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - auth_token=dict( - type='str', no_log=True, - default=os.environ.get('ONEANDONE_AUTH_TOKEN')), - api_url=dict( - type='str', - default=os.environ.get('ONEANDONE_API_URL')), - private_network=dict(type='str'), - name=dict(type='str'), - description=dict(type='str'), - network_address=dict(type='str'), - subnet_mask=dict(type='str'), - add_members=dict(type='list', elements="str", default=[]), - remove_members=dict(type='list', elements="str", default=[]), - datacenter=dict( - choices=DATACENTERS), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - wait_interval=dict(type='int', default=5), - state=dict(type='str', default='present', choices=['present', 'absent', 'update']), - ), - supports_check_mode=True - ) - - if not HAS_ONEANDONE_SDK: - module.fail_json(msg='1and1 required for this module') - - if not module.params.get('auth_token'): - module.fail_json( - msg='auth_token parameter is required.') - - if not module.params.get('api_url'): - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token')) - else: - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) - - state = module.params.get('state') - - if state == 'absent': - if not module.params.get('name'): - module.fail_json( - msg="'name' parameter is required for deleting a network.") - try: - (changed, private_network) = remove_network(module, oneandone_conn) - except Exception as e: - module.fail_json(msg=str(e)) - elif state == 'update': - if not module.params.get('private_network'): - module.fail_json( - msg="'private_network' parameter is required for updating a network.") - try: - (changed, private_network) = update_network(module, oneandone_conn) - except Exception as e: - module.fail_json(msg=str(e)) - elif state == 'present': - if not module.params.get('name'): - module.fail_json( - msg="'name' parameter is required for new networks.") - try: - (changed, private_network) = create_network(module, oneandone_conn) - except Exception as e: - module.fail_json(msg=str(e)) - - module.exit_json(changed=changed, private_network=private_network) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_public_ip.py b/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_public_ip.py deleted file mode 100644 index 96b1c9f3..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_public_ip.py +++ /dev/null @@ -1,342 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: oneandone_public_ip -short_description: Configure 1&1 public IPs. -description: - - Create, update, and remove public IPs. - This module has a dependency on 1and1 >= 1.0 -options: - state: - description: - - Define a public ip state to create, remove, or update. - type: str - required: false - default: 'present' - choices: [ "present", "absent", "update" ] - auth_token: - description: - - Authenticating API token provided by 1&1. - type: str - api_url: - description: - - Custom API URL. Overrides the - ONEANDONE_API_URL environment variable. - type: str - required: false - reverse_dns: - description: - - Reverse DNS name. maxLength=256 - type: str - required: false - datacenter: - description: - - ID of the datacenter where the IP will be created (only for unassigned IPs). - type: str - choices: [US, ES, DE, GB] - default: US - required: false - type: - description: - - Type of IP. Currently, only IPV4 is available. - type: str - choices: ["IPV4", "IPV6"] - default: 'IPV4' - required: false - public_ip_id: - description: - - The ID of the public IP used with update and delete states. - type: str - wait: - description: - - wait for the instance to be in state 'running' before returning - required: false - default: "yes" - type: bool - wait_timeout: - description: - - how long before wait gives up, in seconds - type: int - default: 600 - wait_interval: - description: - - Defines the number of seconds to wait when using the _wait_for methods - type: int - default: 5 - -requirements: - - "1and1" - - "python >= 2.6" - -author: - - Amel Ajdinovic (@aajdinov) - - Ethan Devenport (@edevenport) -''' - -EXAMPLES = ''' -- name: Create a public IP - community.general.oneandone_public_ip: - auth_token: oneandone_private_api_key - reverse_dns: example.com - datacenter: US - type: IPV4 - -- name: Update a public IP - community.general.oneandone_public_ip: - auth_token: oneandone_private_api_key - public_ip_id: public ip id - reverse_dns: secondexample.com - state: update - -- name: Delete a public IP - community.general.oneandone_public_ip: - auth_token: oneandone_private_api_key - public_ip_id: public ip id - state: absent -''' - -RETURN = ''' -public_ip: - description: Information about the public ip that was processed - type: dict - sample: '{"id": "F77CC589EBC120905B4F4719217BFF6D", "ip": "10.5.132.106"}' - returned: always -''' - -import os -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.oneandone import ( - get_datacenter, - get_public_ip, - OneAndOneResources, - wait_for_resource_creation_completion -) - -HAS_ONEANDONE_SDK = True - -try: - import oneandone.client -except ImportError: - HAS_ONEANDONE_SDK = False - -DATACENTERS = ['US', 'ES', 'DE', 'GB'] - -TYPES = ['IPV4', 'IPV6'] - - -def _check_mode(module, result): - if module.check_mode: - module.exit_json( - changed=result - ) - - -def create_public_ip(module, oneandone_conn): - """ - Create new public IP - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - - Returns a dictionary containing a 'changed' attribute indicating whether - any public IP was added. - """ - reverse_dns = module.params.get('reverse_dns') - datacenter = module.params.get('datacenter') - ip_type = module.params.get('type') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - wait_interval = module.params.get('wait_interval') - - if datacenter is not None: - datacenter_id = get_datacenter(oneandone_conn, datacenter) - if datacenter_id is None: - _check_mode(module, False) - module.fail_json( - msg='datacenter %s not found.' % datacenter) - - try: - _check_mode(module, True) - public_ip = oneandone_conn.create_public_ip( - reverse_dns=reverse_dns, - ip_type=ip_type, - datacenter_id=datacenter_id) - - if wait: - wait_for_resource_creation_completion(oneandone_conn, - OneAndOneResources.public_ip, - public_ip['id'], - wait_timeout, - wait_interval) - public_ip = oneandone_conn.get_public_ip(public_ip['id']) - - changed = True if public_ip else False - - return (changed, public_ip) - except Exception as e: - module.fail_json(msg=str(e)) - - -def update_public_ip(module, oneandone_conn): - """ - Update a public IP - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - - Returns a dictionary containing a 'changed' attribute indicating whether - any public IP was changed. - """ - reverse_dns = module.params.get('reverse_dns') - public_ip_id = module.params.get('public_ip_id') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - wait_interval = module.params.get('wait_interval') - - public_ip = get_public_ip(oneandone_conn, public_ip_id, True) - if public_ip is None: - _check_mode(module, False) - module.fail_json( - msg='public IP %s not found.' % public_ip_id) - - try: - _check_mode(module, True) - public_ip = oneandone_conn.modify_public_ip( - ip_id=public_ip['id'], - reverse_dns=reverse_dns) - - if wait: - wait_for_resource_creation_completion(oneandone_conn, - OneAndOneResources.public_ip, - public_ip['id'], - wait_timeout, - wait_interval) - public_ip = oneandone_conn.get_public_ip(public_ip['id']) - - changed = True if public_ip else False - - return (changed, public_ip) - except Exception as e: - module.fail_json(msg=str(e)) - - -def delete_public_ip(module, oneandone_conn): - """ - Delete a public IP - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - - Returns a dictionary containing a 'changed' attribute indicating whether - any public IP was deleted. - """ - public_ip_id = module.params.get('public_ip_id') - - public_ip = get_public_ip(oneandone_conn, public_ip_id, True) - if public_ip is None: - _check_mode(module, False) - module.fail_json( - msg='public IP %s not found.' % public_ip_id) - - try: - _check_mode(module, True) - deleted_public_ip = oneandone_conn.delete_public_ip( - ip_id=public_ip['id']) - - changed = True if deleted_public_ip else False - - return (changed, { - 'id': public_ip['id'] - }) - except Exception as e: - module.fail_json(msg=str(e)) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - auth_token=dict( - type='str', no_log=True, - default=os.environ.get('ONEANDONE_AUTH_TOKEN')), - api_url=dict( - type='str', - default=os.environ.get('ONEANDONE_API_URL')), - public_ip_id=dict(type='str'), - reverse_dns=dict(type='str'), - datacenter=dict( - choices=DATACENTERS, - default='US'), - type=dict( - choices=TYPES, - default='IPV4'), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - wait_interval=dict(type='int', default=5), - state=dict(type='str', default='present', choices=['present', 'absent', 'update']), - ), - supports_check_mode=True - ) - - if not HAS_ONEANDONE_SDK: - module.fail_json(msg='1and1 required for this module') - - if not module.params.get('auth_token'): - module.fail_json( - msg='auth_token parameter is required.') - - if not module.params.get('api_url'): - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token')) - else: - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) - - state = module.params.get('state') - - if state == 'absent': - if not module.params.get('public_ip_id'): - module.fail_json( - msg="'public_ip_id' parameter is required to delete a public ip.") - try: - (changed, public_ip) = delete_public_ip(module, oneandone_conn) - except Exception as e: - module.fail_json(msg=str(e)) - elif state == 'update': - if not module.params.get('public_ip_id'): - module.fail_json( - msg="'public_ip_id' parameter is required to update a public ip.") - try: - (changed, public_ip) = update_public_ip(module, oneandone_conn) - except Exception as e: - module.fail_json(msg=str(e)) - - elif state == 'present': - try: - (changed, public_ip) = create_public_ip(module, oneandone_conn) - except Exception as e: - module.fail_json(msg=str(e)) - - module.exit_json(changed=changed, public_ip=public_ip) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_server.py b/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_server.py deleted file mode 100644 index aa651bd7..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_server.py +++ /dev/null @@ -1,707 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: oneandone_server -short_description: Create, destroy, start, stop, and reboot a 1&1 Host server. -description: - - Create, destroy, update, start, stop, and reboot a 1&1 Host server. - When the server is created it can optionally wait for it to be 'running' before returning. -options: - state: - description: - - Define a server's state to create, remove, start or stop it. - type: str - default: present - choices: [ "present", "absent", "running", "stopped" ] - auth_token: - description: - - Authenticating API token provided by 1&1. Overrides the - ONEANDONE_AUTH_TOKEN environment variable. - type: str - api_url: - description: - - Custom API URL. Overrides the - ONEANDONE_API_URL environment variable. - type: str - datacenter: - description: - - The datacenter location. - type: str - default: US - choices: [ "US", "ES", "DE", "GB" ] - hostname: - description: - - The hostname or ID of the server. Only used when state is 'present'. - type: str - description: - description: - - The description of the server. - type: str - appliance: - description: - - The operating system name or ID for the server. - It is required only for 'present' state. - type: str - fixed_instance_size: - description: - - The instance size name or ID of the server. - It is required only for 'present' state, and it is mutually exclusive with - vcore, cores_per_processor, ram, and hdds parameters. - - 'The available choices are: C(S), C(M), C(L), C(XL), C(XXL), C(3XL), C(4XL), C(5XL)' - type: str - vcore: - description: - - The total number of processors. - It must be provided with cores_per_processor, ram, and hdds parameters. - type: int - cores_per_processor: - description: - - The number of cores per processor. - It must be provided with vcore, ram, and hdds parameters. - type: int - ram: - description: - - The amount of RAM memory. - It must be provided with with vcore, cores_per_processor, and hdds parameters. - type: float - hdds: - description: - - A list of hard disks with nested "size" and "is_main" properties. - It must be provided with vcore, cores_per_processor, and ram parameters. - type: list - elements: dict - private_network: - description: - - The private network name or ID. - type: str - firewall_policy: - description: - - The firewall policy name or ID. - type: str - load_balancer: - description: - - The load balancer name or ID. - type: str - monitoring_policy: - description: - - The monitoring policy name or ID. - type: str - server: - description: - - Server identifier (ID or hostname). It is required for all states except 'running' and 'present'. - type: str - count: - description: - - The number of servers to create. - type: int - default: 1 - ssh_key: - description: - - User's public SSH key (contents, not path). - type: raw - server_type: - description: - - The type of server to be built. - type: str - default: "cloud" - choices: [ "cloud", "baremetal", "k8s_node" ] - wait: - description: - - Wait for the server to be in state 'running' before returning. - Also used for delete operation (set to 'false' if you don't want to wait - for each individual server to be deleted before moving on with - other tasks.) - type: bool - default: 'yes' - wait_timeout: - description: - - how long before wait gives up, in seconds - type: int - default: 600 - wait_interval: - description: - - Defines the number of seconds to wait when using the wait_for methods - type: int - default: 5 - auto_increment: - description: - - When creating multiple servers at once, whether to differentiate - hostnames by appending a count after them or substituting the count - where there is a %02d or %03d in the hostname string. - type: bool - default: 'yes' - -requirements: - - "1and1" - - "python >= 2.6" - -author: - - "Amel Ajdinovic (@aajdinov)" - - "Ethan Devenport (@edevenport)" - -''' - -EXAMPLES = ''' -- name: Create three servers and enumerate their names - community.general.oneandone_server: - auth_token: oneandone_private_api_key - hostname: node%02d - fixed_instance_size: XL - datacenter: US - appliance: C5A349786169F140BCBC335675014C08 - auto_increment: true - count: 3 - -- name: Create three servers, passing in an ssh_key - community.general.oneandone_server: - auth_token: oneandone_private_api_key - hostname: node%02d - vcore: 2 - cores_per_processor: 4 - ram: 8.0 - hdds: - - size: 50 - is_main: false - datacenter: ES - appliance: C5A349786169F140BCBC335675014C08 - count: 3 - wait: yes - wait_timeout: 600 - wait_interval: 10 - ssh_key: SSH_PUBLIC_KEY - -- name: Removing server - community.general.oneandone_server: - auth_token: oneandone_private_api_key - state: absent - server: 'node01' - -- name: Starting server - community.general.oneandone_server: - auth_token: oneandone_private_api_key - state: running - server: 'node01' - -- name: Stopping server - community.general.oneandone_server: - auth_token: oneandone_private_api_key - state: stopped - server: 'node01' -''' - -RETURN = ''' -servers: - description: Information about each server that was processed - type: list - sample: '[{"hostname": "my-server", "id": "server-id"}]' - returned: always -''' - -import os -import time -from ansible.module_utils.six.moves import xrange -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.oneandone import ( - get_datacenter, - get_fixed_instance_size, - get_appliance, - get_private_network, - get_monitoring_policy, - get_firewall_policy, - get_load_balancer, - get_server, - OneAndOneResources, - wait_for_resource_creation_completion, - wait_for_resource_deletion_completion -) - -HAS_ONEANDONE_SDK = True - -try: - import oneandone.client -except ImportError: - HAS_ONEANDONE_SDK = False - -DATACENTERS = ['US', 'ES', 'DE', 'GB'] - -ONEANDONE_SERVER_STATES = ( - 'DEPLOYING', - 'POWERED_OFF', - 'POWERED_ON', - 'POWERING_ON', - 'POWERING_OFF', -) - - -def _check_mode(module, result): - if module.check_mode: - module.exit_json( - changed=result - ) - - -def _create_server(module, oneandone_conn, hostname, description, - fixed_instance_size_id, vcore, cores_per_processor, ram, - hdds, datacenter_id, appliance_id, ssh_key, - private_network_id, firewall_policy_id, load_balancer_id, - monitoring_policy_id, server_type, wait, wait_timeout, - wait_interval): - - try: - existing_server = get_server(oneandone_conn, hostname) - - if existing_server: - if module.check_mode: - return False - return None - - if module.check_mode: - return True - - server = oneandone_conn.create_server( - oneandone.client.Server( - name=hostname, - description=description, - fixed_instance_size_id=fixed_instance_size_id, - vcore=vcore, - cores_per_processor=cores_per_processor, - ram=ram, - appliance_id=appliance_id, - datacenter_id=datacenter_id, - rsa_key=ssh_key, - private_network_id=private_network_id, - firewall_policy_id=firewall_policy_id, - load_balancer_id=load_balancer_id, - monitoring_policy_id=monitoring_policy_id, - server_type=server_type,), hdds) - - if wait: - wait_for_resource_creation_completion( - oneandone_conn, - OneAndOneResources.server, - server['id'], - wait_timeout, - wait_interval) - server = oneandone_conn.get_server(server['id']) # refresh - - return server - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _insert_network_data(server): - for addr_data in server['ips']: - if addr_data['type'] == 'IPV6': - server['public_ipv6'] = addr_data['ip'] - elif addr_data['type'] == 'IPV4': - server['public_ipv4'] = addr_data['ip'] - return server - - -def create_server(module, oneandone_conn): - """ - Create new server - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - - Returns a dictionary containing a 'changed' attribute indicating whether - any server was added, and a 'servers' attribute with the list of the - created servers' hostname, id and ip addresses. - """ - hostname = module.params.get('hostname') - description = module.params.get('description') - auto_increment = module.params.get('auto_increment') - count = module.params.get('count') - fixed_instance_size = module.params.get('fixed_instance_size') - vcore = module.params.get('vcore') - cores_per_processor = module.params.get('cores_per_processor') - ram = module.params.get('ram') - hdds = module.params.get('hdds') - datacenter = module.params.get('datacenter') - appliance = module.params.get('appliance') - ssh_key = module.params.get('ssh_key') - private_network = module.params.get('private_network') - monitoring_policy = module.params.get('monitoring_policy') - firewall_policy = module.params.get('firewall_policy') - load_balancer = module.params.get('load_balancer') - server_type = module.params.get('server_type') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - wait_interval = module.params.get('wait_interval') - - datacenter_id = get_datacenter(oneandone_conn, datacenter) - if datacenter_id is None: - _check_mode(module, False) - module.fail_json( - msg='datacenter %s not found.' % datacenter) - - fixed_instance_size_id = None - if fixed_instance_size: - fixed_instance_size_id = get_fixed_instance_size( - oneandone_conn, - fixed_instance_size) - if fixed_instance_size_id is None: - _check_mode(module, False) - module.fail_json( - msg='fixed_instance_size %s not found.' % fixed_instance_size) - - appliance_id = get_appliance(oneandone_conn, appliance) - if appliance_id is None: - _check_mode(module, False) - module.fail_json( - msg='appliance %s not found.' % appliance) - - private_network_id = None - if private_network: - private_network_id = get_private_network( - oneandone_conn, - private_network) - if private_network_id is None: - _check_mode(module, False) - module.fail_json( - msg='private network %s not found.' % private_network) - - monitoring_policy_id = None - if monitoring_policy: - monitoring_policy_id = get_monitoring_policy( - oneandone_conn, - monitoring_policy) - if monitoring_policy_id is None: - _check_mode(module, False) - module.fail_json( - msg='monitoring policy %s not found.' % monitoring_policy) - - firewall_policy_id = None - if firewall_policy: - firewall_policy_id = get_firewall_policy( - oneandone_conn, - firewall_policy) - if firewall_policy_id is None: - _check_mode(module, False) - module.fail_json( - msg='firewall policy %s not found.' % firewall_policy) - - load_balancer_id = None - if load_balancer: - load_balancer_id = get_load_balancer( - oneandone_conn, - load_balancer) - if load_balancer_id is None: - _check_mode(module, False) - module.fail_json( - msg='load balancer %s not found.' % load_balancer) - - if auto_increment: - hostnames = _auto_increment_hostname(count, hostname) - descriptions = _auto_increment_description(count, description) - else: - hostnames = [hostname] * count - descriptions = [description] * count - - hdd_objs = [] - if hdds: - for hdd in hdds: - hdd_objs.append(oneandone.client.Hdd( - size=hdd['size'], - is_main=hdd['is_main'] - )) - - servers = [] - for index, name in enumerate(hostnames): - server = _create_server( - module=module, - oneandone_conn=oneandone_conn, - hostname=name, - description=descriptions[index], - fixed_instance_size_id=fixed_instance_size_id, - vcore=vcore, - cores_per_processor=cores_per_processor, - ram=ram, - hdds=hdd_objs, - datacenter_id=datacenter_id, - appliance_id=appliance_id, - ssh_key=ssh_key, - private_network_id=private_network_id, - monitoring_policy_id=monitoring_policy_id, - firewall_policy_id=firewall_policy_id, - load_balancer_id=load_balancer_id, - server_type=server_type, - wait=wait, - wait_timeout=wait_timeout, - wait_interval=wait_interval) - if server: - servers.append(server) - - changed = False - - if servers: - for server in servers: - if server: - _check_mode(module, True) - _check_mode(module, False) - servers = [_insert_network_data(_server) for _server in servers] - changed = True - - _check_mode(module, False) - - return (changed, servers) - - -def remove_server(module, oneandone_conn): - """ - Removes a server. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object. - - Returns a dictionary containing a 'changed' attribute indicating whether - the server was removed, and a 'removed_server' attribute with - the removed server's hostname and id. - """ - server_id = module.params.get('server') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - wait_interval = module.params.get('wait_interval') - - changed = False - removed_server = None - - server = get_server(oneandone_conn, server_id, True) - if server: - _check_mode(module, True) - try: - oneandone_conn.delete_server(server_id=server['id']) - if wait: - wait_for_resource_deletion_completion(oneandone_conn, - OneAndOneResources.server, - server['id'], - wait_timeout, - wait_interval) - changed = True - except Exception as ex: - module.fail_json( - msg="failed to terminate the server: %s" % str(ex)) - - removed_server = { - 'id': server['id'], - 'hostname': server['name'] - } - _check_mode(module, False) - - return (changed, removed_server) - - -def startstop_server(module, oneandone_conn): - """ - Starts or Stops a server. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object. - - Returns a dictionary with a 'changed' attribute indicating whether - anything has changed for the server as a result of this function - being run, and a 'server' attribute with basic information for - the server. - """ - state = module.params.get('state') - server_id = module.params.get('server') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - wait_interval = module.params.get('wait_interval') - - changed = False - - # Resolve server - server = get_server(oneandone_conn, server_id, True) - if server: - # Attempt to change the server state, only if it's not already there - # or on its way. - try: - if state == 'stopped' and server['status']['state'] == 'POWERED_ON': - _check_mode(module, True) - oneandone_conn.modify_server_status( - server_id=server['id'], - action='POWER_OFF', - method='SOFTWARE') - elif state == 'running' and server['status']['state'] == 'POWERED_OFF': - _check_mode(module, True) - oneandone_conn.modify_server_status( - server_id=server['id'], - action='POWER_ON', - method='SOFTWARE') - except Exception as ex: - module.fail_json( - msg="failed to set server %s to state %s: %s" % ( - server_id, state, str(ex))) - - _check_mode(module, False) - - # Make sure the server has reached the desired state - if wait: - operation_completed = False - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - time.sleep(wait_interval) - server = oneandone_conn.get_server(server['id']) # refresh - server_state = server['status']['state'] - if state == 'stopped' and server_state == 'POWERED_OFF': - operation_completed = True - break - if state == 'running' and server_state == 'POWERED_ON': - operation_completed = True - break - if not operation_completed: - module.fail_json( - msg="Timeout waiting for server %s to get to state %s" % ( - server_id, state)) - - changed = True - server = _insert_network_data(server) - - _check_mode(module, False) - - return (changed, server) - - -def _auto_increment_hostname(count, hostname): - """ - Allow a custom incremental count in the hostname when defined with the - string formatting (%) operator. Otherwise, increment using name-01, - name-02, name-03, and so forth. - """ - if '%' not in hostname: - hostname = "%s-%%01d" % hostname - - return [ - hostname % i - for i in xrange(1, count + 1) - ] - - -def _auto_increment_description(count, description): - """ - Allow the incremental count in the description when defined with the - string formatting (%) operator. Otherwise, repeat the same description. - """ - if '%' in description: - return [ - description % i - for i in xrange(1, count + 1) - ] - else: - return [description] * count - - -def main(): - module = AnsibleModule( - argument_spec=dict( - auth_token=dict( - type='str', - default=os.environ.get('ONEANDONE_AUTH_TOKEN'), - no_log=True), - api_url=dict( - type='str', - default=os.environ.get('ONEANDONE_API_URL')), - hostname=dict(type='str'), - description=dict(type='str'), - appliance=dict(type='str'), - fixed_instance_size=dict(type='str'), - vcore=dict(type='int'), - cores_per_processor=dict(type='int'), - ram=dict(type='float'), - hdds=dict(type='list', elements='dict'), - count=dict(type='int', default=1), - ssh_key=dict(type='raw', no_log=False), - auto_increment=dict(type='bool', default=True), - server=dict(type='str'), - datacenter=dict( - choices=DATACENTERS, - default='US'), - private_network=dict(type='str'), - firewall_policy=dict(type='str'), - load_balancer=dict(type='str'), - monitoring_policy=dict(type='str'), - server_type=dict(type='str', default='cloud', choices=['cloud', 'baremetal', 'k8s_node']), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - wait_interval=dict(type='int', default=5), - state=dict(type='str', default='present', choices=['present', 'absent', 'running', 'stopped']), - ), - supports_check_mode=True, - mutually_exclusive=(['fixed_instance_size', 'vcore'], ['fixed_instance_size', 'cores_per_processor'], - ['fixed_instance_size', 'ram'], ['fixed_instance_size', 'hdds'],), - required_together=(['vcore', 'cores_per_processor', 'ram', 'hdds'],) - ) - - if not HAS_ONEANDONE_SDK: - module.fail_json(msg='1and1 required for this module') - - if not module.params.get('auth_token'): - module.fail_json( - msg='The "auth_token" parameter or ' + - 'ONEANDONE_AUTH_TOKEN environment variable is required.') - - if not module.params.get('api_url'): - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token')) - else: - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) - - state = module.params.get('state') - - if state == 'absent': - if not module.params.get('server'): - module.fail_json( - msg="'server' parameter is required for deleting a server.") - try: - (changed, servers) = remove_server(module, oneandone_conn) - except Exception as ex: - module.fail_json(msg=str(ex)) - - elif state in ('running', 'stopped'): - if not module.params.get('server'): - module.fail_json( - msg="'server' parameter is required for starting/stopping a server.") - try: - (changed, servers) = startstop_server(module, oneandone_conn) - except Exception as ex: - module.fail_json(msg=str(ex)) - - elif state == 'present': - for param in ('hostname', - 'appliance', - 'datacenter'): - if not module.params.get(param): - module.fail_json( - msg="%s parameter is required for new server." % param) - try: - (changed, servers) = create_server(module, oneandone_conn) - except Exception as ex: - module.fail_json(msg=str(ex)) - - module.exit_json(changed=changed, servers=servers) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/online/online_server_info.py b/ansible_collections/community/general/plugins/modules/cloud/online/online_server_info.py deleted file mode 100644 index cf218efd..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/online/online_server_info.py +++ /dev/null @@ -1,177 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: online_server_info -short_description: Gather information about Online servers. -description: - - Gather information about the servers. - - U(https://www.online.net/en/dedicated-server) -author: - - "Remy Leone (@remyleone)" -extends_documentation_fragment: -- community.general.online - -''' - -EXAMPLES = r''' -- name: Gather Online server information - community.general.online_server_info: - api_token: '0d1627e8-bbf0-44c5-a46f-5c4d3aef033f' - register: result - -- ansible.builtin.debug: - msg: "{{ result.online_server_info }}" -''' - -RETURN = r''' -online_server_info: - description: - - Response from Online API. - - "For more details please refer to: U(https://console.online.net/en/api/)." - returned: success - type: list - elements: dict - sample: - "online_server_info": [ - { - "abuse": "abuse@example.com", - "anti_ddos": false, - "bmc": { - "session_key": null - }, - "boot_mode": "normal", - "contacts": { - "owner": "foobar", - "tech": "foobar" - }, - "disks": [ - { - "$ref": "/api/v1/server/hardware/disk/68452" - }, - { - "$ref": "/api/v1/server/hardware/disk/68453" - } - ], - "drive_arrays": [ - { - "disks": [ - { - "$ref": "/api/v1/server/hardware/disk/68452" - }, - { - "$ref": "/api/v1/server/hardware/disk/68453" - } - ], - "raid_controller": { - "$ref": "/api/v1/server/hardware/raidController/9910" - }, - "raid_level": "RAID1" - } - ], - "hardware_watch": true, - "hostname": "sd-42", - "id": 42, - "ip": [ - { - "address": "195.154.172.149", - "mac": "28:92:4a:33:5e:c6", - "reverse": "195-154-172-149.rev.poneytelecom.eu.", - "switch_port_state": "up", - "type": "public" - }, - { - "address": "10.90.53.212", - "mac": "28:92:4a:33:5e:c7", - "reverse": null, - "switch_port_state": "up", - "type": "private" - } - ], - "last_reboot": "2018-08-23T08:32:03.000Z", - "location": { - "block": "A", - "datacenter": "DC3", - "position": 19, - "rack": "A23", - "room": "4 4-4" - }, - "network": { - "ip": [ - "195.154.172.149" - ], - "ipfo": [], - "private": [ - "10.90.53.212" - ] - }, - "offer": "Pro-1-S-SATA", - "os": { - "name": "FreeBSD", - "version": "11.1-RELEASE" - }, - "power": "ON", - "proactive_monitoring": false, - "raid_controllers": [ - { - "$ref": "/api/v1/server/hardware/raidController/9910" - } - ], - "support": "Basic service level" - } - ] -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.online import ( - Online, OnlineException, online_argument_spec -) - - -class OnlineServerInfo(Online): - - def __init__(self, module): - super(OnlineServerInfo, self).__init__(module) - self.name = 'api/v1/server' - - def _get_server_detail(self, server_path): - try: - return self.get(path=server_path).json - except OnlineException as exc: - self.module.fail_json(msg="A problem occurred while fetching: %s (%s)" % (server_path, exc)) - - def all_detailed_servers(self): - servers_api_path = self.get_resources() - - server_data = ( - self._get_server_detail(server_api_path) - for server_api_path in servers_api_path - ) - - return [s for s in server_data if s is not None] - - -def main(): - module = AnsibleModule( - argument_spec=online_argument_spec(), - supports_check_mode=True, - ) - - try: - servers_info = OnlineServerInfo(module).all_detailed_servers() - module.exit_json( - online_server_info=servers_info - ) - except OnlineException as exc: - module.fail_json(msg=exc.message) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/online/online_user_info.py b/ansible_collections/community/general/plugins/modules/cloud/online/online_user_info.py deleted file mode 100644 index cd1b6dfa..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/online/online_user_info.py +++ /dev/null @@ -1,75 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' -module: online_user_info -short_description: Gather information about Online user. -description: - - Gather information about the user. -author: - - "Remy Leone (@remyleone)" -extends_documentation_fragment: -- community.general.online -''' - -EXAMPLES = r''' -- name: Gather Online user info - community.general.online_user_info: - register: result - -- ansible.builtin.debug: - msg: "{{ result.online_user_info }}" -''' - -RETURN = r''' -online_user_info: - description: - - Response from Online API. - - "For more details please refer to: U(https://console.online.net/en/api/)." - returned: success - type: dict - sample: - "online_user_info": { - "company": "foobar LLC", - "email": "foobar@example.com", - "first_name": "foo", - "id": 42, - "last_name": "bar", - "login": "foobar" - } -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.online import ( - Online, OnlineException, online_argument_spec -) - - -class OnlineUserInfo(Online): - - def __init__(self, module): - super(OnlineUserInfo, self).__init__(module) - self.name = 'api/v1/user' - - -def main(): - module = AnsibleModule( - argument_spec=online_argument_spec(), - supports_check_mode=True, - ) - - try: - module.exit_json( - online_user_info=OnlineUserInfo(module).get_resources() - ) - except OnlineException as exc: - module.fail_json(msg=exc.message) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_host.py b/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_host.py deleted file mode 100644 index f205a40a..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_host.py +++ /dev/null @@ -1,285 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright 2018 www.privaz.io Valletech AB -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: one_host - -short_description: Manages OpenNebula Hosts - - -requirements: - - pyone - -description: - - "Manages OpenNebula Hosts" - -options: - name: - description: - - Hostname of the machine to manage. - required: true - type: str - state: - description: - - Takes the host to the desired lifecycle state. - - If C(absent) the host will be deleted from the cluster. - - If C(present) the host will be created in the cluster (includes C(enabled), C(disabled) and C(offline) states). - - If C(enabled) the host is fully operational. - - C(disabled), e.g. to perform maintenance operations. - - C(offline), host is totally offline. - choices: - - absent - - present - - enabled - - disabled - - offline - default: present - type: str - im_mad_name: - description: - - The name of the information manager, this values are taken from the oned.conf with the tag name IM_MAD (name) - default: kvm - type: str - vmm_mad_name: - description: - - The name of the virtual machine manager mad name, this values are taken from the oned.conf with the tag name VM_MAD (name) - default: kvm - type: str - cluster_id: - description: - - The cluster ID. - default: 0 - type: int - cluster_name: - description: - - The cluster specified by name. - type: str - labels: - description: - - The labels for this host. - type: list - elements: str - template: - description: - - The template or attribute changes to merge into the host template. - aliases: - - attributes - type: dict - -extends_documentation_fragment: -- community.general.opennebula - - -author: - - Rafael del Valle (@rvalle) -''' - -EXAMPLES = ''' -- name: Create a new host in OpenNebula - community.general.one_host: - name: host1 - cluster_id: 1 - api_url: http://127.0.0.1:2633/RPC2 - -- name: Create a host and adjust its template - community.general.one_host: - name: host2 - cluster_name: default - template: - LABELS: - - gold - - ssd - RESERVED_CPU: -100 -''' - -# TODO: pending setting guidelines on returned values -RETURN = ''' -''' - -# TODO: Documentation on valid state transitions is required to properly implement all valid cases -# TODO: To be coherent with CLI this module should also provide "flush" functionality - -from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule - -try: - from pyone import HOST_STATES, HOST_STATUS -except ImportError: - pass # handled at module utils - - -# Pseudo definitions... - -HOST_ABSENT = -99 # the host is absent (special case defined by this module) - - -class HostModule(OpenNebulaModule): - - def __init__(self): - - argument_spec = dict( - name=dict(type='str', required=True), - state=dict(choices=['present', 'absent', 'enabled', 'disabled', 'offline'], default='present'), - im_mad_name=dict(type='str', default="kvm"), - vmm_mad_name=dict(type='str', default="kvm"), - cluster_id=dict(type='int', default=0), - cluster_name=dict(type='str'), - labels=dict(type='list', elements='str'), - template=dict(type='dict', aliases=['attributes']), - ) - - mutually_exclusive = [ - ['cluster_id', 'cluster_name'] - ] - - OpenNebulaModule.__init__(self, argument_spec, mutually_exclusive=mutually_exclusive) - - def allocate_host(self): - """ - Creates a host entry in OpenNebula - Returns: True on success, fails otherwise. - - """ - if not self.one.host.allocate(self.get_parameter('name'), - self.get_parameter('vmm_mad_name'), - self.get_parameter('im_mad_name'), - self.get_parameter('cluster_id')): - self.fail(msg="could not allocate host") - else: - self.result['changed'] = True - return True - - def wait_for_host_state(self, host, target_states): - """ - Utility method that waits for a host state. - Args: - host: - target_states: - - """ - return self.wait_for_state('host', - lambda: self.one.host.info(host.ID).STATE, - lambda s: HOST_STATES(s).name, target_states, - invalid_states=[HOST_STATES.ERROR, HOST_STATES.MONITORING_ERROR]) - - def run(self, one, module, result): - - # Get the list of hosts - host_name = self.get_parameter("name") - host = self.get_host_by_name(host_name) - - # manage host state - desired_state = self.get_parameter('state') - if bool(host): - current_state = host.STATE - current_state_name = HOST_STATES(host.STATE).name - else: - current_state = HOST_ABSENT - current_state_name = "ABSENT" - - # apply properties - if desired_state == 'present': - if current_state == HOST_ABSENT: - self.allocate_host() - host = self.get_host_by_name(host_name) - self.wait_for_host_state(host, [HOST_STATES.MONITORED]) - elif current_state in [HOST_STATES.ERROR, HOST_STATES.MONITORING_ERROR]: - self.fail(msg="invalid host state %s" % current_state_name) - - elif desired_state == 'enabled': - if current_state == HOST_ABSENT: - self.allocate_host() - host = self.get_host_by_name(host_name) - self.wait_for_host_state(host, [HOST_STATES.MONITORED]) - elif current_state in [HOST_STATES.DISABLED, HOST_STATES.OFFLINE]: - if one.host.status(host.ID, HOST_STATUS.ENABLED): - self.wait_for_host_state(host, [HOST_STATES.MONITORED]) - result['changed'] = True - else: - self.fail(msg="could not enable host") - elif current_state in [HOST_STATES.MONITORED]: - pass - else: - self.fail(msg="unknown host state %s, cowardly refusing to change state to enable" % current_state_name) - - elif desired_state == 'disabled': - if current_state == HOST_ABSENT: - self.fail(msg='absent host cannot be put in disabled state') - elif current_state in [HOST_STATES.MONITORED, HOST_STATES.OFFLINE]: - if one.host.status(host.ID, HOST_STATUS.DISABLED): - self.wait_for_host_state(host, [HOST_STATES.DISABLED]) - result['changed'] = True - else: - self.fail(msg="could not disable host") - elif current_state in [HOST_STATES.DISABLED]: - pass - else: - self.fail(msg="unknown host state %s, cowardly refusing to change state to disable" % current_state_name) - - elif desired_state == 'offline': - if current_state == HOST_ABSENT: - self.fail(msg='absent host cannot be placed in offline state') - elif current_state in [HOST_STATES.MONITORED, HOST_STATES.DISABLED]: - if one.host.status(host.ID, HOST_STATUS.OFFLINE): - self.wait_for_host_state(host, [HOST_STATES.OFFLINE]) - result['changed'] = True - else: - self.fail(msg="could not set host offline") - elif current_state in [HOST_STATES.OFFLINE]: - pass - else: - self.fail(msg="unknown host state %s, cowardly refusing to change state to offline" % current_state_name) - - elif desired_state == 'absent': - if current_state != HOST_ABSENT: - if one.host.delete(host.ID): - result['changed'] = True - else: - self.fail(msg="could not delete host from cluster") - - # if we reach this point we can assume that the host was taken to the desired state - - if desired_state != "absent": - # manipulate or modify the template - desired_template_changes = self.get_parameter('template') - - if desired_template_changes is None: - desired_template_changes = dict() - - # complete the template with specific ansible parameters - if self.is_parameter('labels'): - desired_template_changes['LABELS'] = self.get_parameter('labels') - - if self.requires_template_update(host.TEMPLATE, desired_template_changes): - # setup the root element so that pyone will generate XML instead of attribute vector - desired_template_changes = {"TEMPLATE": desired_template_changes} - if one.host.update(host.ID, desired_template_changes, 1): # merge the template - result['changed'] = True - else: - self.fail(msg="failed to update the host template") - - # the cluster - if host.CLUSTER_ID != self.get_parameter('cluster_id'): - if one.cluster.addhost(self.get_parameter('cluster_id'), host.ID): - result['changed'] = True - else: - self.fail(msg="failed to update the host cluster") - - # return - self.exit() - - -def main(): - HostModule().run_module() - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image.py b/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image.py deleted file mode 100644 index 5a80306f..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image.py +++ /dev/null @@ -1,423 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -""" -(c) 2018, Milan Ilic - -This file is part of Ansible - -Ansible is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -Ansible is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a clone of the GNU General Public License -along with Ansible. If not, see . -""" - -DOCUMENTATION = ''' ---- -module: one_image -short_description: Manages OpenNebula images -description: - - Manages OpenNebula images -requirements: - - pyone -options: - api_url: - description: - - URL of the OpenNebula RPC server. - - It is recommended to use HTTPS so that the username/password are not - - transferred over the network unencrypted. - - If not set then the value of the C(ONE_URL) environment variable is used. - type: str - api_username: - description: - - Name of the user to login into the OpenNebula RPC server. If not set - - then the value of the C(ONE_USERNAME) environment variable is used. - type: str - api_password: - description: - - Password of the user to login into OpenNebula RPC server. If not set - - then the value of the C(ONE_PASSWORD) environment variable is used. - type: str - id: - description: - - A C(id) of the image you would like to manage. - type: int - name: - description: - - A C(name) of the image you would like to manage. - type: str - state: - description: - - C(present) - state that is used to manage the image - - C(absent) - delete the image - - C(cloned) - clone the image - - C(renamed) - rename the image to the C(new_name) - choices: ["present", "absent", "cloned", "renamed"] - default: present - type: str - enabled: - description: - - Whether the image should be enabled or disabled. - type: bool - new_name: - description: - - A name that will be assigned to the existing or new image. - - In the case of cloning, by default C(new_name) will take the name of the origin image with the prefix 'Copy of'. - type: str -author: - - "Milan Ilic (@ilicmilan)" -''' - -EXAMPLES = ''' -- name: Fetch the IMAGE by id - community.general.one_image: - id: 45 - register: result - -- name: Print the IMAGE properties - ansible.builtin.debug: - var: result - -- name: Rename existing IMAGE - community.general.one_image: - id: 34 - state: renamed - new_name: bar-image - -- name: Disable the IMAGE by id - community.general.one_image: - id: 37 - enabled: no - -- name: Enable the IMAGE by name - community.general.one_image: - name: bar-image - enabled: yes - -- name: Clone the IMAGE by name - community.general.one_image: - name: bar-image - state: cloned - new_name: bar-image-clone - register: result - -- name: Delete the IMAGE by id - community.general.one_image: - id: '{{ result.id }}' - state: absent -''' - -RETURN = ''' -id: - description: image id - type: int - returned: success - sample: 153 -name: - description: image name - type: str - returned: success - sample: app1 -group_id: - description: image's group id - type: int - returned: success - sample: 1 -group_name: - description: image's group name - type: str - returned: success - sample: one-users -owner_id: - description: image's owner id - type: int - returned: success - sample: 143 -owner_name: - description: image's owner name - type: str - returned: success - sample: ansible-test -state: - description: state of image instance - type: str - returned: success - sample: READY -used: - description: is image in use - type: bool - returned: success - sample: true -running_vms: - description: count of running vms that use this image - type: int - returned: success - sample: 7 -''' - -try: - import pyone - HAS_PYONE = True -except ImportError: - HAS_PYONE = False - -from ansible.module_utils.basic import AnsibleModule -import os - - -def get_image(module, client, predicate): - # Filter -2 means fetch all images user can Use - pool = client.imagepool.info(-2, -1, -1, -1) - - for image in pool.IMAGE: - if predicate(image): - return image - - return None - - -def get_image_by_name(module, client, image_name): - return get_image(module, client, lambda image: (image.NAME == image_name)) - - -def get_image_by_id(module, client, image_id): - return get_image(module, client, lambda image: (image.ID == image_id)) - - -def get_image_instance(module, client, requested_id, requested_name): - if requested_id: - return get_image_by_id(module, client, requested_id) - else: - return get_image_by_name(module, client, requested_name) - - -IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS'] - - -def get_image_info(image): - info = { - 'id': image.ID, - 'name': image.NAME, - 'state': IMAGE_STATES[image.STATE], - 'running_vms': image.RUNNING_VMS, - 'used': bool(image.RUNNING_VMS), - 'user_name': image.UNAME, - 'user_id': image.UID, - 'group_name': image.GNAME, - 'group_id': image.GID, - } - - return info - - -def wait_for_state(module, client, image_id, wait_timeout, state_predicate): - import time - start_time = time.time() - - while (time.time() - start_time) < wait_timeout: - image = client.image.info(image_id) - state = image.STATE - - if state_predicate(state): - return image - - time.sleep(1) - - module.fail_json(msg="Wait timeout has expired!") - - -def wait_for_ready(module, client, image_id, wait_timeout=60): - return wait_for_state(module, client, image_id, wait_timeout, lambda state: (state in [IMAGE_STATES.index('READY')])) - - -def wait_for_delete(module, client, image_id, wait_timeout=60): - return wait_for_state(module, client, image_id, wait_timeout, lambda state: (state in [IMAGE_STATES.index('DELETE')])) - - -def enable_image(module, client, image, enable): - image = client.image.info(image.ID) - changed = False - - state = image.STATE - - if state not in [IMAGE_STATES.index('READY'), IMAGE_STATES.index('DISABLED'), IMAGE_STATES.index('ERROR')]: - if enable: - module.fail_json(msg="Cannot enable " + IMAGE_STATES[state] + " image!") - else: - module.fail_json(msg="Cannot disable " + IMAGE_STATES[state] + " image!") - - if ((enable and state != IMAGE_STATES.index('READY')) or - (not enable and state != IMAGE_STATES.index('DISABLED'))): - changed = True - - if changed and not module.check_mode: - client.image.enable(image.ID, enable) - - result = get_image_info(image) - result['changed'] = changed - - return result - - -def clone_image(module, client, image, new_name): - if new_name is None: - new_name = "Copy of " + image.NAME - - tmp_image = get_image_by_name(module, client, new_name) - if tmp_image: - result = get_image_info(tmp_image) - result['changed'] = False - return result - - if image.STATE == IMAGE_STATES.index('DISABLED'): - module.fail_json(msg="Cannot clone DISABLED image") - - if not module.check_mode: - new_id = client.image.clone(image.ID, new_name) - wait_for_ready(module, client, new_id) - image = client.image.info(new_id) - - result = get_image_info(image) - result['changed'] = True - - return result - - -def rename_image(module, client, image, new_name): - if new_name is None: - module.fail_json(msg="'new_name' option has to be specified when the state is 'renamed'") - - if new_name == image.NAME: - result = get_image_info(image) - result['changed'] = False - return result - - tmp_image = get_image_by_name(module, client, new_name) - if tmp_image: - module.fail_json(msg="Name '" + new_name + "' is already taken by IMAGE with id=" + str(tmp_image.ID)) - - if not module.check_mode: - client.image.rename(image.ID, new_name) - - result = get_image_info(image) - result['changed'] = True - return result - - -def delete_image(module, client, image): - - if not image: - return {'changed': False} - - if image.RUNNING_VMS > 0: - module.fail_json(msg="Cannot delete image. There are " + str(image.RUNNING_VMS) + " VMs using it.") - - if not module.check_mode: - client.image.delete(image.ID) - wait_for_delete(module, client, image.ID) - - return {'changed': True} - - -def get_connection_info(module): - - url = module.params.get('api_url') - username = module.params.get('api_username') - password = module.params.get('api_password') - - if not url: - url = os.environ.get('ONE_URL') - - if not username: - username = os.environ.get('ONE_USERNAME') - - if not password: - password = os.environ.get('ONE_PASSWORD') - - if not(url and username and password): - module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified") - from collections import namedtuple - - auth_params = namedtuple('auth', ('url', 'username', 'password')) - - return auth_params(url=url, username=username, password=password) - - -def main(): - fields = { - "api_url": {"required": False, "type": "str"}, - "api_username": {"required": False, "type": "str"}, - "api_password": {"required": False, "type": "str", "no_log": True}, - "id": {"required": False, "type": "int"}, - "name": {"required": False, "type": "str"}, - "state": { - "default": "present", - "choices": ['present', 'absent', 'cloned', 'renamed'], - "type": "str" - }, - "enabled": {"required": False, "type": "bool"}, - "new_name": {"required": False, "type": "str"}, - } - - module = AnsibleModule(argument_spec=fields, - mutually_exclusive=[['id', 'name']], - supports_check_mode=True) - - if not HAS_PYONE: - module.fail_json(msg='This module requires pyone to work!') - - auth = get_connection_info(module) - params = module.params - id = params.get('id') - name = params.get('name') - state = params.get('state') - enabled = params.get('enabled') - new_name = params.get('new_name') - client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password) - - result = {} - - if not id and state == 'renamed': - module.fail_json(msg="Option 'id' is required when the state is 'renamed'") - - image = get_image_instance(module, client, id, name) - if not image and state != 'absent': - if id: - module.fail_json(msg="There is no image with id=" + str(id)) - else: - module.fail_json(msg="There is no image with name=" + name) - - if state == 'absent': - result = delete_image(module, client, image) - else: - result = get_image_info(image) - changed = False - result['changed'] = False - - if enabled is not None: - result = enable_image(module, client, image, enabled) - if state == "cloned": - result = clone_image(module, client, image, new_name) - elif state == "renamed": - result = rename_image(module, client, image, new_name) - - changed = changed or result['changed'] - result['changed'] = changed - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image_info.py b/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image_info.py deleted file mode 100644 index e03b8ad7..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image_info.py +++ /dev/null @@ -1,289 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -""" -(c) 2018, Milan Ilic - -This file is part of Ansible - -Ansible is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -Ansible is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a clone of the GNU General Public License -along with Ansible. If not, see . -""" - -DOCUMENTATION = ''' ---- -module: one_image_info -short_description: Gather information on OpenNebula images -description: - - Gather information on OpenNebula images. - - This module was called C(one_image_facts) before Ansible 2.9. The usage did not change. -requirements: - - pyone -options: - api_url: - description: - - URL of the OpenNebula RPC server. - - It is recommended to use HTTPS so that the username/password are not - - transferred over the network unencrypted. - - If not set then the value of the C(ONE_URL) environment variable is used. - type: str - api_username: - description: - - Name of the user to login into the OpenNebula RPC server. If not set - - then the value of the C(ONE_USERNAME) environment variable is used. - type: str - api_password: - description: - - Password of the user to login into OpenNebula RPC server. If not set - - then the value of the C(ONE_PASSWORD) environment variable is used. - type: str - ids: - description: - - A list of images ids whose facts you want to gather. - aliases: ['id'] - type: list - elements: str - name: - description: - - A C(name) of the image whose facts will be gathered. - - If the C(name) begins with '~' the C(name) will be used as regex pattern - - which restricts the list of images (whose facts will be returned) whose names match specified regex. - - Also, if the C(name) begins with '~*' case-insensitive matching will be performed. - - See examples for more details. - type: str -author: - - "Milan Ilic (@ilicmilan)" - - "Jan Meerkamp (@meerkampdvv)" -''' - -EXAMPLES = ''' -- name: Gather facts about all images - community.general.one_image_info: - register: result - -- name: Print all images facts - ansible.builtin.debug: - msg: result - -- name: Gather facts about an image using ID - community.general.one_image_info: - ids: - - 123 - -- name: Gather facts about an image using the name - community.general.one_image_info: - name: 'foo-image' - register: foo_image - -- name: Gather facts about all IMAGEs whose name matches regex 'app-image-.*' - community.general.one_image_info: - name: '~app-image-.*' - register: app_images - -- name: Gather facts about all IMAGEs whose name matches regex 'foo-image-.*' ignoring cases - community.general.one_image_info: - name: '~*foo-image-.*' - register: foo_images -''' - -RETURN = ''' -images: - description: A list of images info - type: complex - returned: success - contains: - id: - description: image id - type: int - sample: 153 - name: - description: image name - type: str - sample: app1 - group_id: - description: image's group id - type: int - sample: 1 - group_name: - description: image's group name - type: str - sample: one-users - owner_id: - description: image's owner id - type: int - sample: 143 - owner_name: - description: image's owner name - type: str - sample: ansible-test - state: - description: state of image instance - type: str - sample: READY - used: - description: is image in use - type: bool - sample: true - running_vms: - description: count of running vms that use this image - type: int - sample: 7 -''' - -try: - import pyone - HAS_PYONE = True -except ImportError: - HAS_PYONE = False - -from ansible.module_utils.basic import AnsibleModule -import os - - -def get_all_images(client): - pool = client.imagepool.info(-2, -1, -1, -1) - # Filter -2 means fetch all images user can Use - - return pool - - -IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS'] - - -def get_image_info(image): - info = { - 'id': image.ID, - 'name': image.NAME, - 'state': IMAGE_STATES[image.STATE], - 'running_vms': image.RUNNING_VMS, - 'used': bool(image.RUNNING_VMS), - 'user_name': image.UNAME, - 'user_id': image.UID, - 'group_name': image.GNAME, - 'group_id': image.GID, - } - return info - - -def get_images_by_ids(module, client, ids): - images = [] - pool = get_all_images(client) - - for image in pool.IMAGE: - if str(image.ID) in ids: - images.append(image) - ids.remove(str(image.ID)) - if len(ids) == 0: - break - - if len(ids) > 0: - module.fail_json(msg='There is no IMAGE(s) with id(s)=' + ', '.join('{id}'.format(id=str(image_id)) for image_id in ids)) - - return images - - -def get_images_by_name(module, client, name_pattern): - - images = [] - pattern = None - - pool = get_all_images(client) - - if name_pattern.startswith('~'): - import re - if name_pattern[1] == '*': - pattern = re.compile(name_pattern[2:], re.IGNORECASE) - else: - pattern = re.compile(name_pattern[1:]) - - for image in pool.IMAGE: - if pattern is not None: - if pattern.match(image.NAME): - images.append(image) - elif name_pattern == image.NAME: - images.append(image) - break - - # if the specific name is indicated - if pattern is None and len(images) == 0: - module.fail_json(msg="There is no IMAGE with name=" + name_pattern) - - return images - - -def get_connection_info(module): - - url = module.params.get('api_url') - username = module.params.get('api_username') - password = module.params.get('api_password') - - if not url: - url = os.environ.get('ONE_URL') - - if not username: - username = os.environ.get('ONE_USERNAME') - - if not password: - password = os.environ.get('ONE_PASSWORD') - - if not(url and username and password): - module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified") - from collections import namedtuple - - auth_params = namedtuple('auth', ('url', 'username', 'password')) - - return auth_params(url=url, username=username, password=password) - - -def main(): - fields = { - "api_url": {"required": False, "type": "str"}, - "api_username": {"required": False, "type": "str"}, - "api_password": {"required": False, "type": "str", "no_log": True}, - "ids": {"required": False, "aliases": ['id'], "type": "list", "elements": "str"}, - "name": {"required": False, "type": "str"}, - } - - module = AnsibleModule(argument_spec=fields, - mutually_exclusive=[['ids', 'name']], - supports_check_mode=True) - - if not HAS_PYONE: - module.fail_json(msg='This module requires pyone to work!') - - auth = get_connection_info(module) - params = module.params - ids = params.get('ids') - name = params.get('name') - client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password) - - if ids: - images = get_images_by_ids(module, client, ids) - elif name: - images = get_images_by_name(module, client, name) - else: - images = get_all_images(client).IMAGE - - result = { - 'images': [get_image_info(image) for image in images], - } - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_service.py b/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_service.py deleted file mode 100644 index 68f8398f..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_service.py +++ /dev/null @@ -1,768 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -""" -(c) 2017, Milan Ilic - -This file is part of Ansible - -Ansible is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -Ansible is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with Ansible. If not, see . -""" - -DOCUMENTATION = ''' ---- -module: one_service -short_description: Deploy and manage OpenNebula services -description: - - Manage OpenNebula services -options: - api_url: - description: - - URL of the OpenNebula OneFlow API server. - - It is recommended to use HTTPS so that the username/password are not transferred over the network unencrypted. - - If not set then the value of the ONEFLOW_URL environment variable is used. - type: str - api_username: - description: - - Name of the user to login into the OpenNebula OneFlow API server. If not set then the value of the C(ONEFLOW_USERNAME) environment variable is used. - type: str - api_password: - description: - - Password of the user to login into OpenNebula OneFlow API server. If not set then the value of the C(ONEFLOW_PASSWORD) environment variable is used. - type: str - template_name: - description: - - Name of service template to use to create a new instance of a service - type: str - template_id: - description: - - ID of a service template to use to create a new instance of a service - type: int - service_id: - description: - - ID of a service instance that you would like to manage - type: int - service_name: - description: - - Name of a service instance that you would like to manage - type: str - unique: - description: - - Setting C(unique=yes) will make sure that there is only one service instance running with a name set with C(service_name) when - - instantiating a service from a template specified with C(template_id)/C(template_name). Check examples below. - type: bool - default: no - state: - description: - - C(present) - instantiate a service from a template specified with C(template_id)/C(template_name). - - C(absent) - terminate an instance of a service specified with C(service_id)/C(service_name). - choices: ["present", "absent"] - default: present - type: str - mode: - description: - - Set permission mode of a service instance in octet format, e.g. C(600) to give owner C(use) and C(manage) and nothing to group and others. - type: str - owner_id: - description: - - ID of the user which will be set as the owner of the service - type: int - group_id: - description: - - ID of the group which will be set as the group of the service - type: int - wait: - description: - - Wait for the instance to reach RUNNING state after DEPLOYING or COOLDOWN state after SCALING - type: bool - default: no - wait_timeout: - description: - - How long before wait gives up, in seconds - default: 300 - type: int - custom_attrs: - description: - - Dictionary of key/value custom attributes which will be used when instantiating a new service. - default: {} - type: dict - role: - description: - - Name of the role whose cardinality should be changed - type: str - cardinality: - description: - - Number of VMs for the specified role - type: int - force: - description: - - Force the new cardinality even if it is outside the limits - type: bool - default: no -author: - - "Milan Ilic (@ilicmilan)" -''' - -EXAMPLES = ''' -- name: Instantiate a new service - community.general.one_service: - template_id: 90 - register: result - -- name: Print service properties - ansible.builtin.debug: - msg: result - -- name: Instantiate a new service with specified service_name, service group and mode - community.general.one_service: - template_name: 'app1_template' - service_name: 'app1' - group_id: 1 - mode: '660' - -- name: Instantiate a new service with template_id and pass custom_attrs dict - community.general.one_service: - template_id: 90 - custom_attrs: - public_network_id: 21 - private_network_id: 26 - -- name: Instantiate a new service 'foo' if the service doesn't already exist, otherwise do nothing - community.general.one_service: - template_id: 53 - service_name: 'foo' - unique: yes - -- name: Delete a service by ID - community.general.one_service: - service_id: 153 - state: absent - -- name: Get service info - community.general.one_service: - service_id: 153 - register: service_info - -- name: Change service owner, group and mode - community.general.one_service: - service_name: 'app2' - owner_id: 34 - group_id: 113 - mode: '600' - -- name: Instantiate service and wait for it to become RUNNING - community.general.one_service: - template_id: 43 - service_name: 'foo1' - -- name: Wait service to become RUNNING - community.general.one_service: - service_id: 112 - wait: yes - -- name: Change role cardinality - community.general.one_service: - service_id: 153 - role: bar - cardinality: 5 - -- name: Change role cardinality and wait for it to be applied - community.general.one_service: - service_id: 112 - role: foo - cardinality: 7 - wait: yes -''' - -RETURN = ''' -service_id: - description: service id - type: int - returned: success - sample: 153 -service_name: - description: service name - type: str - returned: success - sample: app1 -group_id: - description: service's group id - type: int - returned: success - sample: 1 -group_name: - description: service's group name - type: str - returned: success - sample: one-users -owner_id: - description: service's owner id - type: int - returned: success - sample: 143 -owner_name: - description: service's owner name - type: str - returned: success - sample: ansible-test -state: - description: state of service instance - type: str - returned: success - sample: RUNNING -mode: - description: service's mode - type: int - returned: success - sample: 660 -roles: - description: list of dictionaries of roles, each role is described by name, cardinality, state and nodes ids - type: list - returned: success - sample: '[{"cardinality": 1,"name": "foo","state": "RUNNING","ids": [ 123, 456 ]}, - {"cardinality": 2,"name": "bar","state": "RUNNING", "ids": [ 452, 567, 746 ]}]' -''' - -import os -import sys -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import open_url - -STATES = ("PENDING", "DEPLOYING", "RUNNING", "UNDEPLOYING", "WARNING", "DONE", - "FAILED_UNDEPLOYING", "FAILED_DEPLOYING", "SCALING", "FAILED_SCALING", "COOLDOWN") - - -def get_all_templates(module, auth): - try: - all_templates = open_url(url=(auth.url + "/service_template"), method="GET", force_basic_auth=True, url_username=auth.user, url_password=auth.password) - except Exception as e: - module.fail_json(msg=str(e)) - - return module.from_json(all_templates.read()) - - -def get_template(module, auth, pred): - all_templates_dict = get_all_templates(module, auth) - - found = 0 - found_template = None - template_name = '' - - if "DOCUMENT_POOL" in all_templates_dict and "DOCUMENT" in all_templates_dict["DOCUMENT_POOL"]: - for template in all_templates_dict["DOCUMENT_POOL"]["DOCUMENT"]: - if pred(template): - found = found + 1 - found_template = template - template_name = template["NAME"] - - if found <= 0: - return None - elif found > 1: - module.fail_json(msg="There is no template with unique name: " + template_name) - else: - return found_template - - -def get_all_services(module, auth): - try: - response = open_url(auth.url + "/service", method="GET", force_basic_auth=True, url_username=auth.user, url_password=auth.password) - except Exception as e: - module.fail_json(msg=str(e)) - - return module.from_json(response.read()) - - -def get_service(module, auth, pred): - all_services_dict = get_all_services(module, auth) - - found = 0 - found_service = None - service_name = '' - - if "DOCUMENT_POOL" in all_services_dict and "DOCUMENT" in all_services_dict["DOCUMENT_POOL"]: - for service in all_services_dict["DOCUMENT_POOL"]["DOCUMENT"]: - if pred(service): - found = found + 1 - found_service = service - service_name = service["NAME"] - - # fail if there are more services with same name - if found > 1: - module.fail_json(msg="There are multiple services with a name: '" + - service_name + "'. You have to use a unique service name or use 'service_id' instead.") - elif found <= 0: - return None - else: - return found_service - - -def get_service_by_id(module, auth, service_id): - return get_service(module, auth, lambda service: (int(service["ID"]) == int(service_id))) if service_id else None - - -def get_service_by_name(module, auth, service_name): - return get_service(module, auth, lambda service: (service["NAME"] == service_name)) - - -def get_service_info(module, auth, service): - - result = { - "service_id": int(service["ID"]), - "service_name": service["NAME"], - "group_id": int(service["GID"]), - "group_name": service["GNAME"], - "owner_id": int(service["UID"]), - "owner_name": service["UNAME"], - "state": STATES[service["TEMPLATE"]["BODY"]["state"]] - } - - roles_status = service["TEMPLATE"]["BODY"]["roles"] - roles = [] - for role in roles_status: - nodes_ids = [] - if "nodes" in role: - for node in role["nodes"]: - nodes_ids.append(node["deploy_id"]) - roles.append({"name": role["name"], "cardinality": role["cardinality"], "state": STATES[int(role["state"])], "ids": nodes_ids}) - - result["roles"] = roles - result["mode"] = int(parse_service_permissions(service)) - - return result - - -def create_service(module, auth, template_id, service_name, custom_attrs, unique, wait, wait_timeout): - # make sure that the values in custom_attrs dict are strings - custom_attrs_with_str = dict((k, str(v)) for k, v in custom_attrs.items()) - - data = { - "action": { - "perform": "instantiate", - "params": { - "merge_template": { - "custom_attrs_values": custom_attrs_with_str, - "name": service_name - } - } - } - } - - try: - response = open_url(auth.url + "/service_template/" + str(template_id) + "/action", method="POST", - data=module.jsonify(data), force_basic_auth=True, url_username=auth.user, url_password=auth.password) - except Exception as e: - module.fail_json(msg=str(e)) - - service_result = module.from_json(response.read())["DOCUMENT"] - - return service_result - - -def wait_for_service_to_become_ready(module, auth, service_id, wait_timeout): - import time - start_time = time.time() - - while (time.time() - start_time) < wait_timeout: - try: - status_result = open_url(auth.url + "/service/" + str(service_id), method="GET", - force_basic_auth=True, url_username=auth.user, url_password=auth.password) - except Exception as e: - module.fail_json(msg="Request for service status has failed. Error message: " + str(e)) - - status_result = module.from_json(status_result.read()) - service_state = status_result["DOCUMENT"]["TEMPLATE"]["BODY"]["state"] - - if service_state in [STATES.index("RUNNING"), STATES.index("COOLDOWN")]: - return status_result["DOCUMENT"] - elif service_state not in [STATES.index("PENDING"), STATES.index("DEPLOYING"), STATES.index("SCALING")]: - log_message = '' - for log_info in status_result["DOCUMENT"]["TEMPLATE"]["BODY"]["log"]: - if log_info["severity"] == "E": - log_message = log_message + log_info["message"] - break - - module.fail_json(msg="Deploying is unsuccessful. Service state: " + STATES[service_state] + ". Error message: " + log_message) - - time.sleep(1) - - module.fail_json(msg="Wait timeout has expired") - - -def change_service_permissions(module, auth, service_id, permissions): - - data = { - "action": { - "perform": "chmod", - "params": {"octet": permissions} - } - } - - try: - status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True, - url_username=auth.user, url_password=auth.password, data=module.jsonify(data)) - except Exception as e: - module.fail_json(msg=str(e)) - - -def change_service_owner(module, auth, service_id, owner_id): - data = { - "action": { - "perform": "chown", - "params": {"owner_id": owner_id} - } - } - - try: - status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True, - url_username=auth.user, url_password=auth.password, data=module.jsonify(data)) - except Exception as e: - module.fail_json(msg=str(e)) - - -def change_service_group(module, auth, service_id, group_id): - - data = { - "action": { - "perform": "chgrp", - "params": {"group_id": group_id} - } - } - - try: - status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True, - url_username=auth.user, url_password=auth.password, data=module.jsonify(data)) - except Exception as e: - module.fail_json(msg=str(e)) - - -def change_role_cardinality(module, auth, service_id, role, cardinality, force): - - data = { - "cardinality": cardinality, - "force": force - } - - try: - status_result = open_url(auth.url + "/service/" + str(service_id) + "/role/" + role, method="PUT", - force_basic_auth=True, url_username=auth.user, url_password=auth.password, data=module.jsonify(data)) - except Exception as e: - module.fail_json(msg=str(e)) - - if status_result.getcode() != 204: - module.fail_json(msg="Failed to change cardinality for role: " + role + ". Return code: " + str(status_result.getcode())) - - -def check_change_service_owner(module, service, owner_id): - old_owner_id = int(service["UID"]) - - return old_owner_id != owner_id - - -def check_change_service_group(module, service, group_id): - old_group_id = int(service["GID"]) - - return old_group_id != group_id - - -def parse_service_permissions(service): - perm_dict = service["PERMISSIONS"] - ''' - This is the structure of the 'PERMISSIONS' dictionary: - - "PERMISSIONS": { - "OWNER_U": "1", - "OWNER_M": "1", - "OWNER_A": "0", - "GROUP_U": "0", - "GROUP_M": "0", - "GROUP_A": "0", - "OTHER_U": "0", - "OTHER_M": "0", - "OTHER_A": "0" - } - ''' - - owner_octal = int(perm_dict["OWNER_U"]) * 4 + int(perm_dict["OWNER_M"]) * 2 + int(perm_dict["OWNER_A"]) - group_octal = int(perm_dict["GROUP_U"]) * 4 + int(perm_dict["GROUP_M"]) * 2 + int(perm_dict["GROUP_A"]) - other_octal = int(perm_dict["OTHER_U"]) * 4 + int(perm_dict["OTHER_M"]) * 2 + int(perm_dict["OTHER_A"]) - - permissions = str(owner_octal) + str(group_octal) + str(other_octal) - - return permissions - - -def check_change_service_permissions(module, service, permissions): - old_permissions = parse_service_permissions(service) - - return old_permissions != permissions - - -def check_change_role_cardinality(module, service, role_name, cardinality): - roles_list = service["TEMPLATE"]["BODY"]["roles"] - - for role in roles_list: - if role["name"] == role_name: - return int(role["cardinality"]) != cardinality - - module.fail_json(msg="There is no role with name: " + role_name) - - -def create_service_and_operation(module, auth, template_id, service_name, owner_id, group_id, permissions, custom_attrs, unique, wait, wait_timeout): - if not service_name: - service_name = '' - changed = False - service = None - - if unique: - service = get_service_by_name(module, auth, service_name) - - if not service: - if not module.check_mode: - service = create_service(module, auth, template_id, service_name, custom_attrs, unique, wait, wait_timeout) - changed = True - - # if check_mode=true and there would be changes, service doesn't exist and we can not get it - if module.check_mode and changed: - return {"changed": True} - - result = service_operation(module, auth, owner_id=owner_id, group_id=group_id, wait=wait, - wait_timeout=wait_timeout, permissions=permissions, service=service) - - if result["changed"]: - changed = True - - result["changed"] = changed - - return result - - -def service_operation(module, auth, service_id=None, owner_id=None, group_id=None, permissions=None, - role=None, cardinality=None, force=None, wait=False, wait_timeout=None, service=None): - - changed = False - - if not service: - service = get_service_by_id(module, auth, service_id) - else: - service_id = service["ID"] - - if not service: - module.fail_json(msg="There is no service with id: " + str(service_id)) - - if owner_id: - if check_change_service_owner(module, service, owner_id): - if not module.check_mode: - change_service_owner(module, auth, service_id, owner_id) - changed = True - if group_id: - if check_change_service_group(module, service, group_id): - if not module.check_mode: - change_service_group(module, auth, service_id, group_id) - changed = True - if permissions: - if check_change_service_permissions(module, service, permissions): - if not module.check_mode: - change_service_permissions(module, auth, service_id, permissions) - changed = True - - if role: - if check_change_role_cardinality(module, service, role, cardinality): - if not module.check_mode: - change_role_cardinality(module, auth, service_id, role, cardinality, force) - changed = True - - if wait and not module.check_mode: - service = wait_for_service_to_become_ready(module, auth, service_id, wait_timeout) - - # if something has changed, fetch service info again - if changed: - service = get_service_by_id(module, auth, service_id) - - service_info = get_service_info(module, auth, service) - service_info["changed"] = changed - - return service_info - - -def delete_service(module, auth, service_id): - service = get_service_by_id(module, auth, service_id) - if not service: - return {"changed": False} - - service_info = get_service_info(module, auth, service) - - service_info["changed"] = True - - if module.check_mode: - return service_info - - try: - result = open_url(auth.url + '/service/' + str(service_id), method="DELETE", force_basic_auth=True, url_username=auth.user, url_password=auth.password) - except Exception as e: - module.fail_json(msg="Service deletion has failed. Error message: " + str(e)) - - return service_info - - -def get_template_by_name(module, auth, template_name): - return get_template(module, auth, lambda template: (template["NAME"] == template_name)) - - -def get_template_by_id(module, auth, template_id): - return get_template(module, auth, lambda template: (int(template["ID"]) == int(template_id))) if template_id else None - - -def get_template_id(module, auth, requested_id, requested_name): - template = get_template_by_id(module, auth, requested_id) if requested_id else get_template_by_name(module, auth, requested_name) - - if template: - return template["ID"] - - return None - - -def get_service_id_by_name(module, auth, service_name): - service = get_service_by_name(module, auth, service_name) - - if service: - return service["ID"] - - return None - - -def get_connection_info(module): - - url = module.params.get('api_url') - username = module.params.get('api_username') - password = module.params.get('api_password') - - if not url: - url = os.environ.get('ONEFLOW_URL') - - if not username: - username = os.environ.get('ONEFLOW_USERNAME') - - if not password: - password = os.environ.get('ONEFLOW_PASSWORD') - - if not(url and username and password): - module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified") - from collections import namedtuple - - auth_params = namedtuple('auth', ('url', 'user', 'password')) - - return auth_params(url=url, user=username, password=password) - - -def main(): - fields = { - "api_url": {"required": False, "type": "str"}, - "api_username": {"required": False, "type": "str"}, - "api_password": {"required": False, "type": "str", "no_log": True}, - "service_name": {"required": False, "type": "str"}, - "service_id": {"required": False, "type": "int"}, - "template_name": {"required": False, "type": "str"}, - "template_id": {"required": False, "type": "int"}, - "state": { - "default": "present", - "choices": ['present', 'absent'], - "type": "str" - }, - "mode": {"required": False, "type": "str"}, - "owner_id": {"required": False, "type": "int"}, - "group_id": {"required": False, "type": "int"}, - "unique": {"default": False, "type": "bool"}, - "wait": {"default": False, "type": "bool"}, - "wait_timeout": {"default": 300, "type": "int"}, - "custom_attrs": {"default": {}, "type": "dict"}, - "role": {"required": False, "type": "str"}, - "cardinality": {"required": False, "type": "int"}, - "force": {"default": False, "type": "bool"} - } - - module = AnsibleModule(argument_spec=fields, - mutually_exclusive=[ - ['template_id', 'template_name', 'service_id'], - ['service_id', 'service_name'], - ['template_id', 'template_name', 'role'], - ['template_id', 'template_name', 'cardinality'], - ['service_id', 'custom_attrs'] - ], - required_together=[['role', 'cardinality']], - supports_check_mode=True) - - auth = get_connection_info(module) - params = module.params - service_name = params.get('service_name') - service_id = params.get('service_id') - - requested_template_id = params.get('template_id') - requested_template_name = params.get('template_name') - state = params.get('state') - permissions = params.get('mode') - owner_id = params.get('owner_id') - group_id = params.get('group_id') - unique = params.get('unique') - wait = params.get('wait') - wait_timeout = params.get('wait_timeout') - custom_attrs = params.get('custom_attrs') - role = params.get('role') - cardinality = params.get('cardinality') - force = params.get('force') - - template_id = None - - if requested_template_id or requested_template_name: - template_id = get_template_id(module, auth, requested_template_id, requested_template_name) - if not template_id: - if requested_template_id: - module.fail_json(msg="There is no template with template_id: " + str(requested_template_id)) - elif requested_template_name: - module.fail_json(msg="There is no template with name: " + requested_template_name) - - if unique and not service_name: - module.fail_json(msg="You cannot use unique without passing service_name!") - - if template_id and state == 'absent': - module.fail_json(msg="State absent is not valid for template") - - if template_id and state == 'present': # Instantiate a service - result = create_service_and_operation(module, auth, template_id, service_name, owner_id, - group_id, permissions, custom_attrs, unique, wait, wait_timeout) - else: - if not (service_id or service_name): - module.fail_json(msg="To manage the service at least the service id or service name should be specified!") - if custom_attrs: - module.fail_json(msg="You can only set custom_attrs when instantiate service!") - - if not service_id: - service_id = get_service_id_by_name(module, auth, service_name) - # The task should be failed when we want to manage a non-existent service identified by its name - if not service_id and state == 'present': - module.fail_json(msg="There is no service with name: " + service_name) - - if state == 'absent': - result = delete_service(module, auth, service_id) - else: - result = service_operation(module, auth, service_id, owner_id, group_id, permissions, role, cardinality, force, wait, wait_timeout) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_template.py b/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_template.py deleted file mode 100644 index b1d2c69c..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_template.py +++ /dev/null @@ -1,277 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2021, Georg Gadinger -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: one_template - -short_description: Manages OpenNebula templates - -version_added: 2.4.0 - -requirements: - - pyone - -description: - - "Manages OpenNebula templates." - -options: - id: - description: - - A I(id) of the template you would like to manage. If not set then a - - new template will be created with the given I(name). - type: int - name: - description: - - A I(name) of the template you would like to manage. If a template with - - the given name does not exist it will be created, otherwise it will be - - managed by this module. - type: str - template: - description: - - A string containing the template contents. - type: str - state: - description: - - C(present) - state that is used to manage the template. - - C(absent) - delete the template. - choices: ["present", "absent"] - default: present - type: str - -notes: - - Supports C(check_mode). Note that check mode always returns C(changed=true) for existing templates, even if the template would not actually change. - -extends_documentation_fragment: - - community.general.opennebula - -author: - - "Georg Gadinger (@nilsding)" -''' - -EXAMPLES = ''' -- name: Fetch the TEMPLATE by id - community.general.one_template: - id: 6459 - register: result - -- name: Print the TEMPLATE properties - ansible.builtin.debug: - var: result - -- name: Fetch the TEMPLATE by name - community.general.one_template: - name: tf-prd-users-workerredis-p6379a - register: result - -- name: Create a new or update an existing TEMPLATE - community.general.one_template: - name: generic-opensuse - template: | - CONTEXT = [ - HOSTNAME = "generic-opensuse" - ] - CPU = "1" - CUSTOM_ATTRIBUTE = "" - DISK = [ - CACHE = "writeback", - DEV_PREFIX = "sd", - DISCARD = "unmap", - IMAGE = "opensuse-leap-15.2", - IMAGE_UNAME = "oneadmin", - IO = "threads", - SIZE = "" ] - MEMORY = "2048" - NIC = [ - MODEL = "virtio", - NETWORK = "testnet", - NETWORK_UNAME = "oneadmin" ] - OS = [ - ARCH = "x86_64", - BOOT = "disk0" ] - SCHED_REQUIREMENTS = "CLUSTER_ID=\\"100\\"" - VCPU = "2" - -- name: Delete the TEMPLATE by id - community.general.one_template: - id: 6459 - state: absent -''' - -RETURN = ''' -id: - description: template id - type: int - returned: when I(state=present) - sample: 153 -name: - description: template name - type: str - returned: when I(state=present) - sample: app1 -template: - description: the parsed template - type: dict - returned: when I(state=present) -group_id: - description: template's group id - type: int - returned: when I(state=present) - sample: 1 -group_name: - description: template's group name - type: str - returned: when I(state=present) - sample: one-users -owner_id: - description: template's owner id - type: int - returned: when I(state=present) - sample: 143 -owner_name: - description: template's owner name - type: str - returned: when I(state=present) - sample: ansible-test -''' - - -from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule - - -class TemplateModule(OpenNebulaModule): - def __init__(self): - argument_spec = dict( - id=dict(type='int', required=False), - name=dict(type='str', required=False), - state=dict(type='str', choices=['present', 'absent'], default='present'), - template=dict(type='str', required=False), - ) - - mutually_exclusive = [ - ['id', 'name'] - ] - - required_one_of = [('id', 'name')] - - required_if = [ - ['state', 'present', ['template']] - ] - - OpenNebulaModule.__init__(self, - argument_spec, - supports_check_mode=True, - mutually_exclusive=mutually_exclusive, - required_one_of=required_one_of, - required_if=required_if) - - def run(self, one, module, result): - params = module.params - id = params.get('id') - name = params.get('name') - desired_state = params.get('state') - template_data = params.get('template') - - self.result = {} - - template = self.get_template_instance(id, name) - needs_creation = False - if not template and desired_state != 'absent': - if id: - module.fail_json(msg="There is no template with id=" + str(id)) - else: - needs_creation = True - - if desired_state == 'absent': - self.result = self.delete_template(template) - else: - if needs_creation: - self.result = self.create_template(name, template_data) - else: - self.result = self.update_template(template, template_data) - - self.exit() - - def get_template(self, predicate): - # -3 means "Resources belonging to the user" - # the other two parameters are used for pagination, -1 for both essentially means "return all" - pool = self.one.templatepool.info(-3, -1, -1) - - for template in pool.VMTEMPLATE: - if predicate(template): - return template - - return None - - def get_template_by_id(self, template_id): - return self.get_template(lambda template: (template.ID == template_id)) - - def get_template_by_name(self, name): - return self.get_template(lambda template: (template.NAME == name)) - - def get_template_instance(self, requested_id, requested_name): - if requested_id: - return self.get_template_by_id(requested_id) - else: - return self.get_template_by_name(requested_name) - - def get_template_info(self, template): - info = { - 'id': template.ID, - 'name': template.NAME, - 'template': template.TEMPLATE, - 'user_name': template.UNAME, - 'user_id': template.UID, - 'group_name': template.GNAME, - 'group_id': template.GID, - } - - return info - - def create_template(self, name, template_data): - if not self.module.check_mode: - self.one.template.allocate("NAME = \"" + name + "\"\n" + template_data) - - result = self.get_template_info(self.get_template_by_name(name)) - result['changed'] = True - - return result - - def update_template(self, template, template_data): - if not self.module.check_mode: - # 0 = replace the whole template - self.one.template.update(template.ID, template_data, 0) - - result = self.get_template_info(self.get_template_by_id(template.ID)) - if self.module.check_mode: - # Unfortunately it is not easy to detect if the template would have changed, therefore always report a change here. - result['changed'] = True - else: - # if the previous parsed template data is not equal to the updated one, this has changed - result['changed'] = template.TEMPLATE != result['template'] - - return result - - def delete_template(self, template): - if not template: - return {'changed': False} - - if not self.module.check_mode: - self.one.template.delete(template.ID) - - return {'changed': True} - - -def main(): - TemplateModule().run_module() - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_vm.py b/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_vm.py deleted file mode 100644 index 86061f73..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_vm.py +++ /dev/null @@ -1,1635 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -""" -(c) 2017, Milan Ilic -(c) 2019, Jan Meerkamp - -This file is part of Ansible - -Ansible is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -Ansible is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with Ansible. If not, see . -""" - -DOCUMENTATION = ''' ---- -module: one_vm -short_description: Creates or terminates OpenNebula instances -description: - - Manages OpenNebula instances -requirements: - - pyone -options: - api_url: - description: - - URL of the OpenNebula RPC server. - - It is recommended to use HTTPS so that the username/password are not - - transferred over the network unencrypted. - - If not set then the value of the C(ONE_URL) environment variable is used. - type: str - api_username: - description: - - Name of the user to login into the OpenNebula RPC server. If not set - - then the value of the C(ONE_USERNAME) environment variable is used. - type: str - api_password: - description: - - Password of the user to login into OpenNebula RPC server. If not set - - then the value of the C(ONE_PASSWORD) environment variable is used. - - if both I(api_username) or I(api_password) are not set, then it will try - - authenticate with ONE auth file. Default path is "~/.one/one_auth". - - Set environment variable C(ONE_AUTH) to override this path. - type: str - template_name: - description: - - Name of VM template to use to create a new instace - type: str - template_id: - description: - - ID of a VM template to use to create a new instance - type: int - vm_start_on_hold: - description: - - Set to true to put vm on hold while creating - default: False - type: bool - instance_ids: - description: - - A list of instance ids used for states':' C(absent), C(running), C(rebooted), C(poweredoff) - aliases: ['ids'] - type: list - elements: int - state: - description: - - C(present) - create instances from a template specified with C(template_id)/C(template_name). - - C(running) - run instances - - C(poweredoff) - power-off instances - - C(rebooted) - reboot instances - - C(absent) - terminate instances - choices: ["present", "absent", "running", "rebooted", "poweredoff"] - default: present - type: str - hard: - description: - - Reboot, power-off or terminate instances C(hard) - default: no - type: bool - wait: - description: - - Wait for the instance to reach its desired state before returning. Keep - - in mind if you are waiting for instance to be in running state it - - doesn't mean that you will be able to SSH on that machine only that - - boot process have started on that instance, see 'wait_for' example for - - details. - default: yes - type: bool - wait_timeout: - description: - - How long before wait gives up, in seconds - default: 300 - type: int - attributes: - description: - - A dictionary of key/value attributes to add to new instances, or for - - setting C(state) of instances with these attributes. - - Keys are case insensitive and OpenNebula automatically converts them to upper case. - - Be aware C(NAME) is a special attribute which sets the name of the VM when it's deployed. - - C(#) character(s) can be appended to the C(NAME) and the module will automatically add - - indexes to the names of VMs. - - For example':' C(NAME':' foo-###) would create VMs with names C(foo-000), C(foo-001),... - - When used with C(count_attributes) and C(exact_count) the module will - - match the base name without the index part. - default: {} - type: dict - labels: - description: - - A list of labels to associate with new instances, or for setting - - C(state) of instances with these labels. - default: [] - type: list - elements: str - count_attributes: - description: - - A dictionary of key/value attributes that can only be used with - - C(exact_count) to determine how many nodes based on a specific - - attributes criteria should be deployed. This can be expressed in - - multiple ways and is shown in the EXAMPLES section. - type: dict - count_labels: - description: - - A list of labels that can only be used with C(exact_count) to determine - - how many nodes based on a specific labels criteria should be deployed. - - This can be expressed in multiple ways and is shown in the EXAMPLES - - section. - type: list - elements: str - count: - description: - - Number of instances to launch - default: 1 - type: int - exact_count: - description: - - Indicates how many instances that match C(count_attributes) and - - C(count_labels) parameters should be deployed. Instances are either - - created or terminated based on this value. - - NOTE':' Instances with the least IDs will be terminated first. - type: int - mode: - description: - - Set permission mode of the instance in octet format, e.g. C(600) to give owner C(use) and C(manage) and nothing to group and others. - type: str - owner_id: - description: - - ID of the user which will be set as the owner of the instance - type: int - group_id: - description: - - ID of the group which will be set as the group of the instance - type: int - memory: - description: - - The size of the memory for new instances (in MB, GB, ...) - type: str - disk_size: - description: - - The size of the disk created for new instances (in MB, GB, TB,...). - - NOTE':' If The Template hats Multiple Disks the Order of the Sizes is - - matched against the order specified in C(template_id)/C(template_name). - type: list - elements: str - cpu: - description: - - Percentage of CPU divided by 100 required for the new instance. Half a - - processor is written 0.5. - type: float - vcpu: - description: - - Number of CPUs (cores) new VM will have. - type: int - networks: - description: - - A list of dictionaries with network parameters. See examples for more details. - default: [] - type: list - elements: dict - disk_saveas: - description: - - Creates an image from a VM disk. - - It is a dictionary where you have to specify C(name) of the new image. - - Optionally you can specify C(disk_id) of the disk you want to save. By default C(disk_id) is 0. - - I(NOTE)':' This operation will only be performed on the first VM (if more than one VM ID is passed) - - and the VM has to be in the C(poweredoff) state. - - Also this operation will fail if an image with specified C(name) already exists. - type: dict - persistent: - description: - - Create a private persistent copy of the template plus any image defined in DISK, and instantiate that copy. - default: NO - type: bool - version_added: '0.2.0' - datastore_id: - description: - - Name of Datastore to use to create a new instace - version_added: '0.2.0' - type: int - datastore_name: - description: - - Name of Datastore to use to create a new instace - version_added: '0.2.0' - type: str -author: - - "Milan Ilic (@ilicmilan)" - - "Jan Meerkamp (@meerkampdvv)" -''' - - -EXAMPLES = ''' -- name: Create a new instance - community.general.one_vm: - template_id: 90 - register: result - -- name: Print VM properties - ansible.builtin.debug: - msg: result - -- name: Deploy a new VM on hold - community.general.one_vm: - template_name: 'app1_template' - vm_start_on_hold: 'True' - -- name: Deploy a new VM and set its name to 'foo' - community.general.one_vm: - template_name: 'app1_template' - attributes: - name: foo - -- name: Deploy a new VM and set its group_id and mode - community.general.one_vm: - template_id: 90 - group_id: 16 - mode: 660 - -- name: Deploy a new VM as persistent - community.general.one_vm: - template_id: 90 - persistent: yes - -- name: Change VM's permissions to 640 - community.general.one_vm: - instance_ids: 5 - mode: 640 - -- name: Deploy 2 new instances and set memory, vcpu, disk_size and 3 networks - community.general.one_vm: - template_id: 15 - disk_size: 35.2 GB - memory: 4 GB - vcpu: 4 - count: 2 - networks: - - NETWORK_ID: 27 - - NETWORK: "default-network" - NETWORK_UNAME: "app-user" - SECURITY_GROUPS: "120,124" - - NETWORK_ID: 27 - SECURITY_GROUPS: "10" - -- name: Deploy a new instance which uses a Template with two Disks - community.general.one_vm: - template_id: 42 - disk_size: - - 35.2 GB - - 50 GB - memory: 4 GB - vcpu: 4 - count: 1 - networks: - - NETWORK_ID: 27 - -- name: "Deploy an new instance with attribute 'bar: bar1' and set its name to 'foo'" - community.general.one_vm: - template_id: 53 - attributes: - name: foo - bar: bar1 - -- name: "Enforce that 2 instances with attributes 'foo1: app1' and 'foo2: app2' are deployed" - community.general.one_vm: - template_id: 53 - attributes: - foo1: app1 - foo2: app2 - exact_count: 2 - count_attributes: - foo1: app1 - foo2: app2 - -- name: Enforce that 4 instances with an attribute 'bar' are deployed - community.general.one_vm: - template_id: 53 - attributes: - name: app - bar: bar2 - exact_count: 4 - count_attributes: - bar: - -# Deploy 2 new instances with attribute 'foo: bar' and labels 'app1' and 'app2' and names in format 'fooapp-##' -# Names will be: fooapp-00 and fooapp-01 -- name: Deploy 2 new instances - community.general.one_vm: - template_id: 53 - attributes: - name: fooapp-## - foo: bar - labels: - - app1 - - app2 - count: 2 - -# Deploy 2 new instances with attribute 'app: app1' and names in format 'fooapp-###' -# Names will be: fooapp-002 and fooapp-003 -- name: Deploy 2 new instances - community.general.one_vm: - template_id: 53 - attributes: - name: fooapp-### - app: app1 - count: 2 - -# Reboot all instances with name in format 'fooapp-#' -# Instances 'fooapp-00', 'fooapp-01', 'fooapp-002' and 'fooapp-003' will be rebooted -- name: Reboot all instances with names in a certain format - community.general.one_vm: - attributes: - name: fooapp-# - state: rebooted - -# Enforce that only 1 instance with name in format 'fooapp-#' is deployed -# The task will delete oldest instances, so only the 'fooapp-003' will remain -- name: Enforce that only 1 instance with name in a certain format is deployed - community.general.one_vm: - template_id: 53 - exact_count: 1 - count_attributes: - name: fooapp-# - -- name: Deploy an new instance with a network - community.general.one_vm: - template_id: 53 - networks: - - NETWORK_ID: 27 - register: vm - -- name: Wait for SSH to come up - ansible.builtin.wait_for_connection: - delegate_to: '{{ vm.instances[0].networks[0].ip }}' - -- name: Terminate VMs by ids - community.general.one_vm: - instance_ids: - - 153 - - 160 - state: absent - -- name: Reboot all VMs that have labels 'foo' and 'app1' - community.general.one_vm: - labels: - - foo - - app1 - state: rebooted - -- name: "Fetch all VMs that have name 'foo' and attribute 'app: bar'" - community.general.one_vm: - attributes: - name: foo - app: bar - register: results - -- name: Deploy 2 new instances with labels 'foo1' and 'foo2' - community.general.one_vm: - template_name: app_template - labels: - - foo1 - - foo2 - count: 2 - -- name: Enforce that only 1 instance with label 'foo1' will be running - community.general.one_vm: - template_name: app_template - labels: - - foo1 - exact_count: 1 - count_labels: - - foo1 - -- name: Terminate all instances that have attribute foo - community.general.one_vm: - template_id: 53 - exact_count: 0 - count_attributes: - foo: - -- name: "Power-off the VM and save VM's disk with id=0 to the image with name 'foo-image'" - community.general.one_vm: - instance_ids: 351 - state: poweredoff - disk_saveas: - name: foo-image - -- name: "Save VM's disk with id=1 to the image with name 'bar-image'" - community.general.one_vm: - instance_ids: 351 - disk_saveas: - name: bar-image - disk_id: 1 -''' - -RETURN = ''' -instances_ids: - description: a list of instances ids whose state is changed or which are fetched with C(instance_ids) option. - type: list - returned: success - sample: [ 1234, 1235 ] -instances: - description: a list of instances info whose state is changed or which are fetched with C(instance_ids) option. - type: complex - returned: success - contains: - vm_id: - description: vm id - type: int - sample: 153 - vm_name: - description: vm name - type: str - sample: foo - template_id: - description: vm's template id - type: int - sample: 153 - group_id: - description: vm's group id - type: int - sample: 1 - group_name: - description: vm's group name - type: str - sample: one-users - owner_id: - description: vm's owner id - type: int - sample: 143 - owner_name: - description: vm's owner name - type: str - sample: app-user - mode: - description: vm's mode - type: str - returned: success - sample: 660 - state: - description: state of an instance - type: str - sample: ACTIVE - lcm_state: - description: lcm state of an instance that is only relevant when the state is ACTIVE - type: str - sample: RUNNING - cpu: - description: Percentage of CPU divided by 100 - type: float - sample: 0.2 - vcpu: - description: Number of CPUs (cores) - type: int - sample: 2 - memory: - description: The size of the memory in MB - type: str - sample: 4096 MB - disk_size: - description: The size of the disk in MB - type: str - sample: 20480 MB - networks: - description: a list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC - type: list - sample: [ - { - "ip": "10.120.5.33", - "mac": "02:00:0a:78:05:21", - "name": "default-test-private", - "security_groups": "0,10" - }, - { - "ip": "10.120.5.34", - "mac": "02:00:0a:78:05:22", - "name": "default-test-private", - "security_groups": "0" - } - ] - uptime_h: - description: Uptime of the instance in hours - type: int - sample: 35 - labels: - description: A list of string labels that are associated with the instance - type: list - sample: [ - "foo", - "spec-label" - ] - attributes: - description: A dictionary of key/values attributes that are associated with the instance - type: dict - sample: { - "HYPERVISOR": "kvm", - "LOGO": "images/logos/centos.png", - "TE_GALAXY": "bar", - "USER_INPUTS": null - } -tagged_instances: - description: - - A list of instances info based on a specific attributes and/or - - labels that are specified with C(count_attributes) and C(count_labels) - - options. - type: complex - returned: success - contains: - vm_id: - description: vm id - type: int - sample: 153 - vm_name: - description: vm name - type: str - sample: foo - template_id: - description: vm's template id - type: int - sample: 153 - group_id: - description: vm's group id - type: int - sample: 1 - group_name: - description: vm's group name - type: str - sample: one-users - owner_id: - description: vm's user id - type: int - sample: 143 - owner_name: - description: vm's user name - type: str - sample: app-user - mode: - description: vm's mode - type: str - returned: success - sample: 660 - state: - description: state of an instance - type: str - sample: ACTIVE - lcm_state: - description: lcm state of an instance that is only relevant when the state is ACTIVE - type: str - sample: RUNNING - cpu: - description: Percentage of CPU divided by 100 - type: float - sample: 0.2 - vcpu: - description: Number of CPUs (cores) - type: int - sample: 2 - memory: - description: The size of the memory in MB - type: str - sample: 4096 MB - disk_size: - description: The size of the disk in MB - type: list - sample: [ - "20480 MB", - "10240 MB" - ] - networks: - description: a list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC - type: list - sample: [ - { - "ip": "10.120.5.33", - "mac": "02:00:0a:78:05:21", - "name": "default-test-private", - "security_groups": "0,10" - }, - { - "ip": "10.120.5.34", - "mac": "02:00:0a:78:05:22", - "name": "default-test-private", - "security_groups": "0" - } - ] - uptime_h: - description: Uptime of the instance in hours - type: int - sample: 35 - labels: - description: A list of string labels that are associated with the instance - type: list - sample: [ - "foo", - "spec-label" - ] - attributes: - description: A dictionary of key/values attributes that are associated with the instance - type: dict - sample: { - "HYPERVISOR": "kvm", - "LOGO": "images/logos/centos.png", - "TE_GALAXY": "bar", - "USER_INPUTS": null - } -''' - -try: - import pyone - HAS_PYONE = True -except ImportError: - HAS_PYONE = False - -from ansible.module_utils.basic import AnsibleModule -import os - - -def get_template(module, client, predicate): - - pool = client.templatepool.info(-2, -1, -1, -1) - # Filter -2 means fetch all templates user can Use - found = 0 - found_template = None - template_name = '' - - for template in pool.VMTEMPLATE: - if predicate(template): - found = found + 1 - found_template = template - template_name = template.NAME - - if found == 0: - return None - elif found > 1: - module.fail_json(msg='There are more templates with name: ' + template_name) - return found_template - - -def get_template_by_name(module, client, template_name): - return get_template(module, client, lambda template: (template.NAME == template_name)) - - -def get_template_by_id(module, client, template_id): - return get_template(module, client, lambda template: (template.ID == template_id)) - - -def get_template_id(module, client, requested_id, requested_name): - template = get_template_by_id(module, client, requested_id) if requested_id is not None else get_template_by_name(module, client, requested_name) - if template: - return template.ID - else: - return None - - -def get_datastore(module, client, predicate): - pool = client.datastorepool.info() - found = 0 - found_datastore = None - datastore_name = '' - - for datastore in pool.DATASTORE: - if predicate(datastore): - found = found + 1 - found_datastore = datastore - datastore_name = datastore.NAME - - if found == 0: - return None - elif found > 1: - module.fail_json(msg='There are more datastores with name: ' + datastore_name) - return found_datastore - - -def get_datastore_by_name(module, client, datastore_name): - return get_datastore(module, client, lambda datastore: (datastore.NAME == datastore_name)) - - -def get_datastore_by_id(module, client, datastore_id): - return get_datastore(module, client, lambda datastore: (datastore.ID == datastore_id)) - - -def get_datastore_id(module, client, requested_id, requested_name): - datastore = get_datastore_by_id(module, client, requested_id) if requested_id else get_datastore_by_name(module, client, requested_name) - if datastore: - return datastore.ID - else: - return None - - -def get_vm_by_id(client, vm_id): - try: - vm = client.vm.info(int(vm_id)) - except BaseException: - return None - return vm - - -def get_vms_by_ids(module, client, state, ids): - vms = [] - - for vm_id in ids: - vm = get_vm_by_id(client, vm_id) - if vm is None and state != 'absent': - module.fail_json(msg='There is no VM with id=' + str(vm_id)) - vms.append(vm) - - return vms - - -def get_vm_info(client, vm): - - vm = client.vm.info(vm.ID) - - networks_info = [] - - disk_size = [] - if 'DISK' in vm.TEMPLATE: - if isinstance(vm.TEMPLATE['DISK'], list): - for disk in vm.TEMPLATE['DISK']: - disk_size.append(disk['SIZE'] + ' MB') - else: - disk_size.append(vm.TEMPLATE['DISK']['SIZE'] + ' MB') - - if 'NIC' in vm.TEMPLATE: - if isinstance(vm.TEMPLATE['NIC'], list): - for nic in vm.TEMPLATE['NIC']: - networks_info.append({ - 'ip': nic.get('IP', ''), - 'mac': nic.get('MAC', ''), - 'name': nic.get('NETWORK', ''), - 'security_groups': nic.get('SECURITY_GROUPS', '') - }) - else: - networks_info.append({ - 'ip': vm.TEMPLATE['NIC'].get('IP', ''), - 'mac': vm.TEMPLATE['NIC'].get('MAC', ''), - 'name': vm.TEMPLATE['NIC'].get('NETWORK', ''), - 'security_groups': - vm.TEMPLATE['NIC'].get('SECURITY_GROUPS', '') - }) - import time - - current_time = time.localtime() - vm_start_time = time.localtime(vm.STIME) - - vm_uptime = time.mktime(current_time) - time.mktime(vm_start_time) - vm_uptime /= (60 * 60) - - permissions_str = parse_vm_permissions(client, vm) - - # LCM_STATE is VM's sub-state that is relevant only when STATE is ACTIVE - vm_lcm_state = None - if vm.STATE == VM_STATES.index('ACTIVE'): - vm_lcm_state = LCM_STATES[vm.LCM_STATE] - - vm_labels, vm_attributes = get_vm_labels_and_attributes_dict(client, vm.ID) - - info = { - 'template_id': int(vm.TEMPLATE['TEMPLATE_ID']), - 'vm_id': vm.ID, - 'vm_name': vm.NAME, - 'state': VM_STATES[vm.STATE], - 'lcm_state': vm_lcm_state, - 'owner_name': vm.UNAME, - 'owner_id': vm.UID, - 'networks': networks_info, - 'disk_size': disk_size, - 'memory': vm.TEMPLATE['MEMORY'] + ' MB', - 'vcpu': vm.TEMPLATE['VCPU'], - 'cpu': vm.TEMPLATE['CPU'], - 'group_name': vm.GNAME, - 'group_id': vm.GID, - 'uptime_h': int(vm_uptime), - 'attributes': vm_attributes, - 'mode': permissions_str, - 'labels': vm_labels - } - - return info - - -def parse_vm_permissions(client, vm): - vm_PERMISSIONS = client.vm.info(vm.ID).PERMISSIONS - - owner_octal = int(vm_PERMISSIONS.OWNER_U) * 4 + int(vm_PERMISSIONS.OWNER_M) * 2 + int(vm_PERMISSIONS.OWNER_A) - group_octal = int(vm_PERMISSIONS.GROUP_U) * 4 + int(vm_PERMISSIONS.GROUP_M) * 2 + int(vm_PERMISSIONS.GROUP_A) - other_octal = int(vm_PERMISSIONS.OTHER_U) * 4 + int(vm_PERMISSIONS.OTHER_M) * 2 + int(vm_PERMISSIONS.OTHER_A) - - permissions = str(owner_octal) + str(group_octal) + str(other_octal) - - return permissions - - -def set_vm_permissions(module, client, vms, permissions): - changed = False - - for vm in vms: - vm = client.vm.info(vm.ID) - old_permissions = parse_vm_permissions(client, vm) - changed = changed or old_permissions != permissions - - if not module.check_mode and old_permissions != permissions: - permissions_str = bin(int(permissions, base=8))[2:] # 600 -> 110000000 - mode_bits = [int(d) for d in permissions_str] - try: - client.vm.chmod( - vm.ID, mode_bits[0], mode_bits[1], mode_bits[2], mode_bits[3], mode_bits[4], mode_bits[5], mode_bits[6], mode_bits[7], mode_bits[8]) - except pyone.OneAuthorizationException: - module.fail_json(msg="Permissions changing is unsuccessful, but instances are present if you deployed them.") - - return changed - - -def set_vm_ownership(module, client, vms, owner_id, group_id): - changed = False - - for vm in vms: - vm = client.vm.info(vm.ID) - if owner_id is None: - owner_id = vm.UID - if group_id is None: - group_id = vm.GID - - changed = changed or owner_id != vm.UID or group_id != vm.GID - - if not module.check_mode and (owner_id != vm.UID or group_id != vm.GID): - try: - client.vm.chown(vm.ID, owner_id, group_id) - except pyone.OneAuthorizationException: - module.fail_json(msg="Ownership changing is unsuccessful, but instances are present if you deployed them.") - - return changed - - -def get_size_in_MB(module, size_str): - - SYMBOLS = ['B', 'KB', 'MB', 'GB', 'TB'] - - s = size_str - init = size_str - num = "" - while s and s[0:1].isdigit() or s[0:1] == '.': - num += s[0] - s = s[1:] - num = float(num) - symbol = s.strip() - - if symbol not in SYMBOLS: - module.fail_json(msg="Cannot interpret %r %r %d" % (init, symbol, num)) - - prefix = {'B': 1} - - for i, s in enumerate(SYMBOLS[1:]): - prefix[s] = 1 << (i + 1) * 10 - - size_in_bytes = int(num * prefix[symbol]) - size_in_MB = size_in_bytes / (1024 * 1024) - - return size_in_MB - - -def create_disk_str(module, client, template_id, disk_size_list): - - if not disk_size_list: - return '' - - template = client.template.info(template_id) - if isinstance(template.TEMPLATE['DISK'], list): - # check if the number of disks is correct - if len(template.TEMPLATE['DISK']) != len(disk_size_list): - module.fail_json(msg='This template has ' + str(len(template.TEMPLATE['DISK'])) + ' disks but you defined ' + str(len(disk_size_list))) - result = '' - index = 0 - for DISKS in template.TEMPLATE['DISK']: - disk = {} - diskresult = '' - # Get all info about existed disk e.g. IMAGE_ID,... - for key, value in DISKS.items(): - disk[key] = value - # copy disk attributes if it is not the size attribute - diskresult += 'DISK = [' + ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in disk.items() if key != 'SIZE') - # Set the Disk Size - diskresult += ', SIZE=' + str(int(get_size_in_MB(module, disk_size_list[index]))) + ']\n' - result += diskresult - index += 1 - else: - if len(disk_size_list) > 1: - module.fail_json(msg='This template has one disk but you defined ' + str(len(disk_size_list))) - disk = {} - # Get all info about existed disk e.g. IMAGE_ID,... - for key, value in template.TEMPLATE['DISK'].items(): - disk[key] = value - # copy disk attributes if it is not the size attribute - result = 'DISK = [' + ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in disk.items() if key != 'SIZE') - # Set the Disk Size - result += ', SIZE=' + str(int(get_size_in_MB(module, disk_size_list[0]))) + ']\n' - - return result - - -def create_attributes_str(attributes_dict, labels_list): - - attributes_str = '' - - if labels_list: - attributes_str += 'LABELS="' + ','.join('{label}'.format(label=label) for label in labels_list) + '"\n' - if attributes_dict: - attributes_str += '\n'.join('{key}="{val}"'.format(key=key.upper(), val=val) for key, val in attributes_dict.items()) + '\n' - - return attributes_str - - -def create_nics_str(network_attrs_list): - nics_str = '' - - for network in network_attrs_list: - # Packing key-value dict in string with format key="value", key="value" - network_str = ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in network.items()) - nics_str = nics_str + 'NIC = [' + network_str + ']\n' - - return nics_str - - -def create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list, vm_start_on_hold, vm_persistent): - - if attributes_dict: - vm_name = attributes_dict.get('NAME', '') - - disk_str = create_disk_str(module, client, template_id, disk_size) - vm_extra_template_str = create_attributes_str(attributes_dict, labels_list) + create_nics_str(network_attrs_list) + disk_str - try: - vm_id = client.template.instantiate(template_id, vm_name, vm_start_on_hold, vm_extra_template_str, vm_persistent) - except pyone.OneException as e: - module.fail_json(msg=str(e)) - vm = get_vm_by_id(client, vm_id) - - return get_vm_info(client, vm) - - -def generate_next_index(vm_filled_indexes_list, num_sign_cnt): - counter = 0 - cnt_str = str(counter).zfill(num_sign_cnt) - - while cnt_str in vm_filled_indexes_list: - counter = counter + 1 - cnt_str = str(counter).zfill(num_sign_cnt) - - return cnt_str - - -def get_vm_labels_and_attributes_dict(client, vm_id): - vm_USER_TEMPLATE = client.vm.info(vm_id).USER_TEMPLATE - - attrs_dict = {} - labels_list = [] - - for key, value in vm_USER_TEMPLATE.items(): - if key != 'LABELS': - attrs_dict[key] = value - else: - if key is not None: - labels_list = value.split(',') - - return labels_list, attrs_dict - - -def get_all_vms_by_attributes(client, attributes_dict, labels_list): - pool = client.vmpool.info(-2, -1, -1, -1).VM - vm_list = [] - name = '' - if attributes_dict: - name = attributes_dict.pop('NAME', '') - - if name != '': - base_name = name[:len(name) - name.count('#')] - # Check does the name have indexed format - with_hash = name.endswith('#') - - for vm in pool: - if vm.NAME.startswith(base_name): - if with_hash and vm.NAME[len(base_name):].isdigit(): - # If the name has indexed format and after base_name it has only digits it'll be matched - vm_list.append(vm) - elif not with_hash and vm.NAME == name: - # If the name is not indexed it has to be same - vm_list.append(vm) - pool = vm_list - - import copy - - vm_list = copy.copy(pool) - - for vm in pool: - remove_list = [] - vm_labels_list, vm_attributes_dict = get_vm_labels_and_attributes_dict(client, vm.ID) - - if attributes_dict and len(attributes_dict) > 0: - for key, val in attributes_dict.items(): - if key in vm_attributes_dict: - if val and vm_attributes_dict[key] != val: - remove_list.append(vm) - break - else: - remove_list.append(vm) - break - vm_list = list(set(vm_list).difference(set(remove_list))) - - remove_list = [] - if labels_list and len(labels_list) > 0: - for label in labels_list: - if label not in vm_labels_list: - remove_list.append(vm) - break - vm_list = list(set(vm_list).difference(set(remove_list))) - - return vm_list - - -def create_count_of_vms( - module, client, template_id, count, attributes_dict, labels_list, disk_size, network_attrs_list, wait, wait_timeout, vm_start_on_hold, vm_persistent): - new_vms_list = [] - - vm_name = '' - if attributes_dict: - vm_name = attributes_dict.get('NAME', '') - - if module.check_mode: - return True, [], [] - - # Create list of used indexes - vm_filled_indexes_list = None - num_sign_cnt = vm_name.count('#') - if vm_name != '' and num_sign_cnt > 0: - vm_list = get_all_vms_by_attributes(client, {'NAME': vm_name}, None) - base_name = vm_name[:len(vm_name) - num_sign_cnt] - vm_name = base_name - # Make list which contains used indexes in format ['000', '001',...] - vm_filled_indexes_list = list((vm.NAME[len(base_name):].zfill(num_sign_cnt)) for vm in vm_list) - - while count > 0: - new_vm_name = vm_name - # Create indexed name - if vm_filled_indexes_list is not None: - next_index = generate_next_index(vm_filled_indexes_list, num_sign_cnt) - vm_filled_indexes_list.append(next_index) - new_vm_name += next_index - # Update NAME value in the attributes in case there is index - attributes_dict['NAME'] = new_vm_name - new_vm_dict = create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list, vm_start_on_hold, vm_persistent) - new_vm_id = new_vm_dict.get('vm_id') - new_vm = get_vm_by_id(client, new_vm_id) - new_vms_list.append(new_vm) - count -= 1 - - if vm_start_on_hold: - if wait: - for vm in new_vms_list: - wait_for_hold(module, client, vm, wait_timeout) - else: - if wait: - for vm in new_vms_list: - wait_for_running(module, client, vm, wait_timeout) - - return True, new_vms_list, [] - - -def create_exact_count_of_vms(module, client, template_id, exact_count, attributes_dict, count_attributes_dict, - labels_list, count_labels_list, disk_size, network_attrs_list, hard, wait, wait_timeout, vm_start_on_hold, vm_persistent): - - vm_list = get_all_vms_by_attributes(client, count_attributes_dict, count_labels_list) - - vm_count_diff = exact_count - len(vm_list) - changed = vm_count_diff != 0 - - new_vms_list = [] - instances_list = [] - tagged_instances_list = vm_list - - if module.check_mode: - return changed, instances_list, tagged_instances_list - - if vm_count_diff > 0: - # Add more VMs - changed, instances_list, tagged_instances = create_count_of_vms(module, client, template_id, vm_count_diff, attributes_dict, - labels_list, disk_size, network_attrs_list, wait, wait_timeout, - vm_start_on_hold, vm_persistent) - - tagged_instances_list += instances_list - elif vm_count_diff < 0: - # Delete surplus VMs - old_vms_list = [] - - while vm_count_diff < 0: - old_vm = vm_list.pop(0) - old_vms_list.append(old_vm) - terminate_vm(module, client, old_vm, hard) - vm_count_diff += 1 - - if wait: - for vm in old_vms_list: - wait_for_done(module, client, vm, wait_timeout) - - instances_list = old_vms_list - # store only the remaining instances - old_vms_set = set(old_vms_list) - tagged_instances_list = [vm for vm in vm_list if vm not in old_vms_set] - - return changed, instances_list, tagged_instances_list - - -VM_STATES = ['INIT', 'PENDING', 'HOLD', 'ACTIVE', 'STOPPED', 'SUSPENDED', 'DONE', '', 'POWEROFF', 'UNDEPLOYED', 'CLONING', 'CLONING_FAILURE'] -LCM_STATES = ['LCM_INIT', 'PROLOG', 'BOOT', 'RUNNING', 'MIGRATE', 'SAVE_STOP', - 'SAVE_SUSPEND', 'SAVE_MIGRATE', 'PROLOG_MIGRATE', 'PROLOG_RESUME', - 'EPILOG_STOP', 'EPILOG', 'SHUTDOWN', 'STATE13', 'STATE14', 'CLEANUP_RESUBMIT', 'UNKNOWN', 'HOTPLUG', 'SHUTDOWN_POWEROFF', - 'BOOT_UNKNOWN', 'BOOT_POWEROFF', 'BOOT_SUSPENDED', 'BOOT_STOPPED', 'CLEANUP_DELETE', 'HOTPLUG_SNAPSHOT', 'HOTPLUG_NIC', - 'HOTPLUG_SAVEAS', 'HOTPLUG_SAVEAS_POWEROFF', 'HOTPULG_SAVEAS_SUSPENDED', 'SHUTDOWN_UNDEPLOY'] - - -def wait_for_state(module, client, vm, wait_timeout, state_predicate): - import time - start_time = time.time() - - while (time.time() - start_time) < wait_timeout: - vm = client.vm.info(vm.ID) - state = vm.STATE - lcm_state = vm.LCM_STATE - - if state_predicate(state, lcm_state): - return vm - elif state not in [VM_STATES.index('INIT'), VM_STATES.index('PENDING'), VM_STATES.index('HOLD'), - VM_STATES.index('ACTIVE'), VM_STATES.index('CLONING'), VM_STATES.index('POWEROFF')]: - module.fail_json(msg='Action is unsuccessful. VM state: ' + VM_STATES[state]) - - time.sleep(1) - - module.fail_json(msg="Wait timeout has expired!") - - -def wait_for_running(module, client, vm, wait_timeout): - return wait_for_state(module, client, vm, wait_timeout, lambda state, - lcm_state: (state in [VM_STATES.index('ACTIVE')] and lcm_state in [LCM_STATES.index('RUNNING')])) - - -def wait_for_done(module, client, vm, wait_timeout): - return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('DONE')])) - - -def wait_for_hold(module, client, vm, wait_timeout): - return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('HOLD')])) - - -def wait_for_poweroff(module, client, vm, wait_timeout): - return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('POWEROFF')])) - - -def terminate_vm(module, client, vm, hard=False): - changed = False - - if not vm: - return changed - - changed = True - - if not module.check_mode: - if hard: - client.vm.action('terminate-hard', vm.ID) - else: - client.vm.action('terminate', vm.ID) - - return changed - - -def terminate_vms(module, client, vms, hard): - changed = False - - for vm in vms: - changed = terminate_vm(module, client, vm, hard) or changed - - return changed - - -def poweroff_vm(module, client, vm, hard): - vm = client.vm.info(vm.ID) - changed = False - - lcm_state = vm.LCM_STATE - state = vm.STATE - - if lcm_state not in [LCM_STATES.index('SHUTDOWN'), LCM_STATES.index('SHUTDOWN_POWEROFF')] and state not in [VM_STATES.index('POWEROFF')]: - changed = True - - if changed and not module.check_mode: - if not hard: - client.vm.action('poweroff', vm.ID) - else: - client.vm.action('poweroff-hard', vm.ID) - - return changed - - -def poweroff_vms(module, client, vms, hard): - changed = False - - for vm in vms: - changed = poweroff_vm(module, client, vm, hard) or changed - - return changed - - -def reboot_vms(module, client, vms, wait_timeout, hard): - - if not module.check_mode: - # Firstly, power-off all instances - for vm in vms: - vm = client.vm.info(vm.ID) - lcm_state = vm.LCM_STATE - state = vm.STATE - if lcm_state not in [LCM_STATES.index('SHUTDOWN_POWEROFF')] and state not in [VM_STATES.index('POWEROFF')]: - poweroff_vm(module, client, vm, hard) - - # Wait for all to be power-off - for vm in vms: - wait_for_poweroff(module, client, vm, wait_timeout) - - for vm in vms: - resume_vm(module, client, vm) - - return True - - -def resume_vm(module, client, vm): - vm = client.vm.info(vm.ID) - changed = False - - state = vm.STATE - if state in [VM_STATES.index('HOLD')]: - changed = release_vm(module, client, vm) - return changed - - lcm_state = vm.LCM_STATE - if lcm_state == LCM_STATES.index('SHUTDOWN_POWEROFF'): - module.fail_json(msg="Cannot perform action 'resume' because this action is not available " + - "for LCM_STATE: 'SHUTDOWN_POWEROFF'. Wait for the VM to shutdown properly") - if lcm_state not in [LCM_STATES.index('RUNNING')]: - changed = True - - if changed and not module.check_mode: - client.vm.action('resume', vm.ID) - - return changed - - -def resume_vms(module, client, vms): - changed = False - - for vm in vms: - changed = resume_vm(module, client, vm) or changed - - return changed - - -def release_vm(module, client, vm): - vm = client.vm.info(vm.ID) - changed = False - - state = vm.STATE - if state != VM_STATES.index('HOLD'): - module.fail_json(msg="Cannot perform action 'release' because this action is not available " + - "because VM is not in state 'HOLD'.") - else: - changed = True - - if changed and not module.check_mode: - client.vm.action('release', vm.ID) - - return changed - - -def check_name_attribute(module, attributes): - if attributes.get("NAME"): - import re - if re.match(r'^[^#]+#*$', attributes.get("NAME")) is None: - module.fail_json(msg="Ilegal 'NAME' attribute: '" + attributes.get("NAME") + - "' .Signs '#' are allowed only at the end of the name and the name cannot contain only '#'.") - - -TEMPLATE_RESTRICTED_ATTRIBUTES = ["CPU", "VCPU", "OS", "FEATURES", "MEMORY", "DISK", "NIC", "INPUT", "GRAPHICS", - "CONTEXT", "CREATED_BY", "CPU_COST", "DISK_COST", "MEMORY_COST", - "TEMPLATE_ID", "VMID", "AUTOMATIC_DS_REQUIREMENTS", "DEPLOY_FOLDER", "LABELS"] - - -def check_attributes(module, attributes): - for key in attributes.keys(): - if key in TEMPLATE_RESTRICTED_ATTRIBUTES: - module.fail_json(msg='Restricted attribute `' + key + '` cannot be used when filtering VMs.') - # Check the format of the name attribute - check_name_attribute(module, attributes) - - -def disk_save_as(module, client, vm, disk_saveas, wait_timeout): - if not disk_saveas.get('name'): - module.fail_json(msg="Key 'name' is required for 'disk_saveas' option") - - image_name = disk_saveas.get('name') - disk_id = disk_saveas.get('disk_id', 0) - - if not module.check_mode: - if vm.STATE != VM_STATES.index('POWEROFF'): - module.fail_json(msg="'disksaveas' option can be used only when the VM is in 'POWEROFF' state") - try: - client.vm.disksaveas(vm.ID, disk_id, image_name, 'OS', -1) - except pyone.OneException as e: - module.fail_json(msg=str(e)) - wait_for_poweroff(module, client, vm, wait_timeout) # wait for VM to leave the hotplug_saveas_poweroff state - - -def get_connection_info(module): - - url = module.params.get('api_url') - username = module.params.get('api_username') - password = module.params.get('api_password') - - if not url: - url = os.environ.get('ONE_URL') - - if not username: - username = os.environ.get('ONE_USERNAME') - - if not password: - password = os.environ.get('ONE_PASSWORD') - - if not username: - if not password: - authfile = os.environ.get('ONE_AUTH') - if authfile is None: - authfile = os.path.join(os.environ.get("HOME"), ".one", "one_auth") - try: - with open(authfile, "r") as fp: - authstring = fp.read().rstrip() - username = authstring.split(":")[0] - password = authstring.split(":")[1] - except (OSError, IOError): - module.fail_json(msg=("Could not find or read ONE_AUTH file at '%s'" % authfile)) - except Exception: - module.fail_json(msg=("Error occurs when read ONE_AUTH file at '%s'" % authfile)) - if not url: - module.fail_json(msg="Opennebula API url (api_url) is not specified") - from collections import namedtuple - - auth_params = namedtuple('auth', ('url', 'username', 'password')) - - return auth_params(url=url, username=username, password=password) - - -def main(): - fields = { - "api_url": {"required": False, "type": "str"}, - "api_username": {"required": False, "type": "str"}, - "api_password": {"required": False, "type": "str", "no_log": True}, - "instance_ids": {"required": False, "aliases": ['ids'], "type": "list", "elements": "int"}, - "template_name": {"required": False, "type": "str"}, - "template_id": {"required": False, "type": "int"}, - "vm_start_on_hold": {"default": False, "type": "bool"}, - "state": { - "default": "present", - "choices": ['present', 'absent', 'rebooted', 'poweredoff', 'running'], - "type": "str" - }, - "mode": {"required": False, "type": "str"}, - "owner_id": {"required": False, "type": "int"}, - "group_id": {"required": False, "type": "int"}, - "wait": {"default": True, "type": "bool"}, - "wait_timeout": {"default": 300, "type": "int"}, - "hard": {"default": False, "type": "bool"}, - "memory": {"required": False, "type": "str"}, - "cpu": {"required": False, "type": "float"}, - "vcpu": {"required": False, "type": "int"}, - "disk_size": {"required": False, "type": "list", "elements": "str"}, - "datastore_name": {"required": False, "type": "str"}, - "datastore_id": {"required": False, "type": "int"}, - "networks": {"default": [], "type": "list", "elements": "dict"}, - "count": {"default": 1, "type": "int"}, - "exact_count": {"required": False, "type": "int"}, - "attributes": {"default": {}, "type": "dict"}, - "count_attributes": {"required": False, "type": "dict"}, - "labels": {"default": [], "type": "list", "elements": "str"}, - "count_labels": {"required": False, "type": "list", "elements": "str"}, - "disk_saveas": {"type": "dict"}, - "persistent": {"default": False, "type": "bool"} - } - - module = AnsibleModule(argument_spec=fields, - mutually_exclusive=[ - ['template_id', 'template_name', 'instance_ids'], - ['template_id', 'template_name', 'disk_saveas'], - ['instance_ids', 'count_attributes', 'count'], - ['instance_ids', 'count_labels', 'count'], - ['instance_ids', 'exact_count'], - ['instance_ids', 'attributes'], - ['instance_ids', 'labels'], - ['disk_saveas', 'attributes'], - ['disk_saveas', 'labels'], - ['exact_count', 'count'], - ['count', 'hard'], - ['instance_ids', 'cpu'], ['instance_ids', 'vcpu'], - ['instance_ids', 'memory'], ['instance_ids', 'disk_size'], - ['instance_ids', 'networks'], - ['persistent', 'disk_size'] - ], - supports_check_mode=True) - - if not HAS_PYONE: - module.fail_json(msg='This module requires pyone to work!') - - auth = get_connection_info(module) - params = module.params - instance_ids = params.get('instance_ids') - requested_template_name = params.get('template_name') - requested_template_id = params.get('template_id') - put_vm_on_hold = params.get('vm_start_on_hold') - state = params.get('state') - permissions = params.get('mode') - owner_id = params.get('owner_id') - group_id = params.get('group_id') - wait = params.get('wait') - wait_timeout = params.get('wait_timeout') - hard = params.get('hard') - memory = params.get('memory') - cpu = params.get('cpu') - vcpu = params.get('vcpu') - disk_size = params.get('disk_size') - requested_datastore_id = params.get('datastore_id') - requested_datastore_name = params.get('datastore_name') - networks = params.get('networks') - count = params.get('count') - exact_count = params.get('exact_count') - attributes = params.get('attributes') - count_attributes = params.get('count_attributes') - labels = params.get('labels') - count_labels = params.get('count_labels') - disk_saveas = params.get('disk_saveas') - persistent = params.get('persistent') - - if not (auth.username and auth.password): - module.warn("Credentials missing") - else: - one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password) - - if attributes: - attributes = dict((key.upper(), value) for key, value in attributes.items()) - check_attributes(module, attributes) - - if count_attributes: - count_attributes = dict((key.upper(), value) for key, value in count_attributes.items()) - if not attributes: - import copy - module.warn('When you pass `count_attributes` without `attributes` option when deploying, `attributes` option will have same values implicitly.') - attributes = copy.copy(count_attributes) - check_attributes(module, count_attributes) - - if count_labels and not labels: - module.warn('When you pass `count_labels` without `labels` option when deploying, `labels` option will have same values implicitly.') - labels = count_labels - - # Fetch template - template_id = None - if requested_template_id is not None or requested_template_name: - template_id = get_template_id(module, one_client, requested_template_id, requested_template_name) - if template_id is None: - if requested_template_id is not None: - module.fail_json(msg='There is no template with template_id: ' + str(requested_template_id)) - elif requested_template_name: - module.fail_json(msg="There is no template with name: " + requested_template_name) - - # Fetch datastore - datastore_id = None - if requested_datastore_id or requested_datastore_name: - datastore_id = get_datastore_id(module, one_client, requested_datastore_id, requested_datastore_name) - if datastore_id is None: - if requested_datastore_id: - module.fail_json(msg='There is no datastore with datastore_id: ' + str(requested_datastore_id)) - elif requested_datastore_name: - module.fail_json(msg="There is no datastore with name: " + requested_datastore_name) - else: - attributes['SCHED_DS_REQUIREMENTS'] = 'ID=' + str(datastore_id) - - if exact_count and template_id is None: - module.fail_json(msg='Option `exact_count` needs template_id or template_name') - - if exact_count is not None and not (count_attributes or count_labels): - module.fail_json(msg='Either `count_attributes` or `count_labels` has to be specified with option `exact_count`.') - if (count_attributes or count_labels) and exact_count is None: - module.fail_json(msg='Option `exact_count` has to be specified when either `count_attributes` or `count_labels` is used.') - if template_id is not None and state != 'present': - module.fail_json(msg="Only state 'present' is valid for the template") - - if memory: - attributes['MEMORY'] = str(int(get_size_in_MB(module, memory))) - if cpu: - attributes['CPU'] = str(cpu) - if vcpu: - attributes['VCPU'] = str(vcpu) - - if exact_count is not None and state != 'present': - module.fail_json(msg='The `exact_count` option is valid only for the `present` state') - if exact_count is not None and exact_count < 0: - module.fail_json(msg='`exact_count` cannot be less than 0') - if count <= 0: - module.fail_json(msg='`count` has to be greater than 0') - - if permissions is not None: - import re - if re.match("^[0-7]{3}$", permissions) is None: - module.fail_json(msg="Option `mode` has to have exactly 3 digits and be in the octet format e.g. 600") - - if exact_count is not None: - # Deploy an exact count of VMs - changed, instances_list, tagged_instances_list = create_exact_count_of_vms(module, one_client, template_id, exact_count, attributes, - count_attributes, labels, count_labels, disk_size, - networks, hard, wait, wait_timeout, put_vm_on_hold, persistent) - vms = tagged_instances_list - elif template_id is not None and state == 'present': - # Deploy count VMs - changed, instances_list, tagged_instances_list = create_count_of_vms(module, one_client, template_id, count, - attributes, labels, disk_size, networks, wait, wait_timeout, - put_vm_on_hold, persistent) - # instances_list - new instances - # tagged_instances_list - all instances with specified `count_attributes` and `count_labels` - vms = instances_list - else: - # Fetch data of instances, or change their state - if not (instance_ids or attributes or labels): - module.fail_json(msg="At least one of `instance_ids`,`attributes`,`labels` must be passed!") - - if memory or cpu or vcpu or disk_size or networks: - module.fail_json(msg="Parameters as `memory`, `cpu`, `vcpu`, `disk_size` and `networks` you can only set when deploying a VM!") - - if hard and state not in ['rebooted', 'poweredoff', 'absent', 'present']: - module.fail_json(msg="The 'hard' option can be used only for one of these states: 'rebooted', 'poweredoff', 'absent' and 'present'") - - vms = [] - tagged = False - changed = False - - if instance_ids: - vms = get_vms_by_ids(module, one_client, state, instance_ids) - else: - tagged = True - vms = get_all_vms_by_attributes(one_client, attributes, labels) - - if len(vms) == 0 and state != 'absent' and state != 'present': - module.fail_json(msg='There are no instances with specified `instance_ids`, `attributes` and/or `labels`') - - if len(vms) == 0 and state == 'present' and not tagged: - module.fail_json(msg='There are no instances with specified `instance_ids`.') - - if tagged and state == 'absent': - module.fail_json(msg='Option `instance_ids` is required when state is `absent`.') - - if state == 'absent': - changed = terminate_vms(module, one_client, vms, hard) - elif state == 'rebooted': - changed = reboot_vms(module, one_client, vms, wait_timeout, hard) - elif state == 'poweredoff': - changed = poweroff_vms(module, one_client, vms, hard) - elif state == 'running': - changed = resume_vms(module, one_client, vms) - - instances_list = vms - tagged_instances_list = [] - - if permissions is not None: - changed = set_vm_permissions(module, one_client, vms, permissions) or changed - - if owner_id is not None or group_id is not None: - changed = set_vm_ownership(module, one_client, vms, owner_id, group_id) or changed - - if wait and not module.check_mode and state != 'present': - wait_for = { - 'absent': wait_for_done, - 'rebooted': wait_for_running, - 'poweredoff': wait_for_poweroff, - 'running': wait_for_running - } - for vm in vms: - if vm is not None: - wait_for[state](module, one_client, vm, wait_timeout) - - if disk_saveas is not None: - if len(vms) == 0: - module.fail_json(msg="There is no VM whose disk will be saved.") - disk_save_as(module, one_client, vms[0], disk_saveas, wait_timeout) - changed = True - - # instances - a list of instances info whose state is changed or which are fetched with C(instance_ids) option - instances = list(get_vm_info(one_client, vm) for vm in instances_list if vm is not None) - instances_ids = list(vm.ID for vm in instances_list if vm is not None) - # tagged_instances - A list of instances info based on a specific attributes and/or labels that are specified with C(count_attributes) and C(count_labels) - tagged_instances = list(get_vm_info(one_client, vm) for vm in tagged_instances_list if vm is not None) - - result = {'changed': changed, 'instances': instances, 'instances_ids': instances_ids, 'tagged_instances': tagged_instances} - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/oracle/oci_vcn.py b/ansible_collections/community/general/plugins/modules/cloud/oracle/oci_vcn.py deleted file mode 100644 index a82914bd..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/oracle/oci_vcn.py +++ /dev/null @@ -1,222 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2017, 2018, Oracle and/or its affiliates. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: oci_vcn -short_description: Manage Virtual Cloud Networks(VCN) in OCI -description: - - This module allows the user to create, delete and update virtual cloud networks(VCNs) in OCI. - The complete Oracle Cloud Infrastructure Ansible Modules can be downloaded from - U(https://github.com/oracle/oci-ansible-modules/releases). -options: - cidr_block: - description: The CIDR IP address block of the VCN. Required when creating a VCN with I(state=present). - type: str - required: false - compartment_id: - description: The OCID of the compartment to contain the VCN. Required when creating a VCN with I(state=present). - This option is mutually exclusive with I(vcn_id). - type: str - display_name: - description: A user-friendly name. Does not have to be unique, and it's changeable. - type: str - aliases: [ 'name' ] - dns_label: - description: A DNS label for the VCN, used in conjunction with the VNIC's hostname and subnet's DNS label to - form a fully qualified domain name (FQDN) for each VNIC within this subnet (for example, - bminstance-1.subnet123.vcn1.oraclevcn.com). Not required to be unique, but it's a best practice - to set unique DNS labels for VCNs in your tenancy. Must be an alphanumeric string that begins - with a letter. The value cannot be changed. - type: str - state: - description: Create or update a VCN with I(state=present). Use I(state=absent) to delete a VCN. - type: str - default: present - choices: ['present', 'absent'] - vcn_id: - description: The OCID of the VCN. Required when deleting a VCN with I(state=absent) or updating a VCN - with I(state=present). This option is mutually exclusive with I(compartment_id). - type: str - aliases: [ 'id' ] -author: "Rohit Chaware (@rohitChaware)" -extends_documentation_fragment: -- community.general.oracle -- community.general.oracle_creatable_resource -- community.general.oracle_wait_options -- community.general.oracle_tags - -''' - -EXAMPLES = """ -- name: Create a VCN - community.general.oci_vcn: - cidr_block: '10.0.0.0/16' - compartment_id: 'ocid1.compartment.oc1..xxxxxEXAMPLExxxxx' - display_name: my_vcn - dns_label: ansiblevcn - -- name: Updates the specified VCN's display name - community.general.oci_vcn: - vcn_id: ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx - display_name: ansible_vcn - -- name: Delete the specified VCN - community.general.oci_vcn: - vcn_id: ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx - state: absent -""" - -RETURN = """ -vcn: - description: Information about the VCN - returned: On successful create and update operation - type: dict - sample: { - "cidr_block": "10.0.0.0/16", - compartment_id": "ocid1.compartment.oc1..xxxxxEXAMPLExxxxx", - "default_dhcp_options_id": "ocid1.dhcpoptions.oc1.phx.xxxxxEXAMPLExxxxx", - "default_route_table_id": "ocid1.routetable.oc1.phx.xxxxxEXAMPLExxxxx", - "default_security_list_id": "ocid1.securitylist.oc1.phx.xxxxxEXAMPLExxxxx", - "display_name": "ansible_vcn", - "dns_label": "ansiblevcn", - "id": "ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx", - "lifecycle_state": "AVAILABLE", - "time_created": "2017-11-13T20:22:40.626000+00:00", - "vcn_domain_name": "ansiblevcn.oraclevcn.com" - } -""" - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible_collections.community.general.plugins.module_utils.oracle import oci_utils - -try: - from oci.core.virtual_network_client import VirtualNetworkClient - from oci.core.models import CreateVcnDetails - from oci.core.models import UpdateVcnDetails - - HAS_OCI_PY_SDK = True -except ImportError: - HAS_OCI_PY_SDK = False - - -def delete_vcn(virtual_network_client, module): - result = oci_utils.delete_and_wait( - resource_type="vcn", - client=virtual_network_client, - get_fn=virtual_network_client.get_vcn, - kwargs_get={"vcn_id": module.params["vcn_id"]}, - delete_fn=virtual_network_client.delete_vcn, - kwargs_delete={"vcn_id": module.params["vcn_id"]}, - module=module, - ) - return result - - -def update_vcn(virtual_network_client, module): - result = oci_utils.check_and_update_resource( - resource_type="vcn", - client=virtual_network_client, - get_fn=virtual_network_client.get_vcn, - kwargs_get={"vcn_id": module.params["vcn_id"]}, - update_fn=virtual_network_client.update_vcn, - primitive_params_update=["vcn_id"], - kwargs_non_primitive_update={UpdateVcnDetails: "update_vcn_details"}, - module=module, - update_attributes=list(UpdateVcnDetails().attribute_map.keys()), - ) - return result - - -def create_vcn(virtual_network_client, module): - create_vcn_details = CreateVcnDetails() - for attribute in create_vcn_details.attribute_map.keys(): - if attribute in module.params: - setattr(create_vcn_details, attribute, module.params[attribute]) - - result = oci_utils.create_and_wait( - resource_type="vcn", - create_fn=virtual_network_client.create_vcn, - kwargs_create={"create_vcn_details": create_vcn_details}, - client=virtual_network_client, - get_fn=virtual_network_client.get_vcn, - get_param="vcn_id", - module=module, - ) - return result - - -def main(): - module_args = oci_utils.get_taggable_arg_spec( - supports_create=True, supports_wait=True - ) - module_args.update( - dict( - cidr_block=dict(type="str", required=False), - compartment_id=dict(type="str", required=False), - display_name=dict(type="str", required=False, aliases=["name"]), - dns_label=dict(type="str", required=False), - state=dict( - type="str", - required=False, - default="present", - choices=["absent", "present"], - ), - vcn_id=dict(type="str", required=False, aliases=["id"]), - ) - ) - - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=False, - mutually_exclusive=[["compartment_id", "vcn_id"]], - ) - - if not HAS_OCI_PY_SDK: - module.fail_json(msg=missing_required_lib("oci")) - - virtual_network_client = oci_utils.create_service_client( - module, VirtualNetworkClient - ) - - exclude_attributes = {"display_name": True, "dns_label": True} - state = module.params["state"] - vcn_id = module.params["vcn_id"] - - if state == "absent": - if vcn_id is not None: - result = delete_vcn(virtual_network_client, module) - else: - module.fail_json( - msg="Specify vcn_id with state as 'absent' to delete a VCN." - ) - - else: - if vcn_id is not None: - result = update_vcn(virtual_network_client, module) - else: - result = oci_utils.check_and_create_resource( - resource_type="vcn", - create_fn=create_vcn, - kwargs_create={ - "virtual_network_client": virtual_network_client, - "module": module, - }, - list_fn=virtual_network_client.list_vcns, - kwargs_list={"compartment_id": module.params["compartment_id"]}, - module=module, - model=CreateVcnDetails(), - exclude_attributes=exclude_attributes, - ) - - module.exit_json(**result) - - -if __name__ == "__main__": - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_ip_failover.py b/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_ip_failover.py deleted file mode 100644 index 26179eb8..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_ip_failover.py +++ /dev/null @@ -1,261 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: ovh_ip_failover -short_description: Manage OVH IP failover address -description: - - Manage OVH (French European hosting provider) IP Failover Address. For now, this module can only be used to move - an ip failover (or failover block) between services -author: "Pascal HERAUD (@pascalheraud)" -notes: - - Uses the python OVH Api U(https://github.com/ovh/python-ovh). - You have to create an application (a key and secret) with a consummer - key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/) -requirements: - - ovh >= 0.4.8 -options: - name: - required: true - description: - - The IP address to manage (can be a single IP like 1.1.1.1 - or a block like 1.1.1.1/28 ) - type: str - service: - required: true - description: - - The name of the OVH service this IP address should be routed - type: str - endpoint: - required: true - description: - - The endpoint to use ( for instance ovh-eu) - type: str - wait_completion: - required: false - default: true - type: bool - description: - - If true, the module will wait for the IP address to be moved. - If false, exit without waiting. The taskId will be returned - in module output - wait_task_completion: - required: false - default: 0 - description: - - If not 0, the module will wait for this task id to be - completed. Use wait_task_completion if you want to wait for - completion of a previously executed task with - wait_completion=false. You can execute this module repeatedly on - a list of failover IPs using wait_completion=false (see examples) - type: int - application_key: - required: true - description: - - The applicationKey to use - type: str - application_secret: - required: true - description: - - The application secret to use - type: str - consumer_key: - required: true - description: - - The consumer key to use - type: str - timeout: - required: false - default: 120 - description: - - The timeout in seconds used to wait for a task to be - completed. Default is 120 seconds. - type: int - -''' - -EXAMPLES = ''' -# Route an IP address 1.1.1.1 to the service ns666.ovh.net -- community.general.ovh_ip_failover: - name: 1.1.1.1 - service: ns666.ovh.net - endpoint: ovh-eu - application_key: yourkey - application_secret: yoursecret - consumer_key: yourconsumerkey -- community.general.ovh_ip_failover: - name: 1.1.1.1 - service: ns666.ovh.net - endpoint: ovh-eu - wait_completion: false - application_key: yourkey - application_secret: yoursecret - consumer_key: yourconsumerkey - register: moved -- community.general.ovh_ip_failover: - name: 1.1.1.1 - service: ns666.ovh.net - endpoint: ovh-eu - wait_task_completion: "{{moved.taskId}}" - application_key: yourkey - application_secret: yoursecret - consumer_key: yourconsumerkey -''' - -RETURN = ''' -''' - -import time - -try: - import ovh - import ovh.exceptions - from ovh.exceptions import APIError - HAS_OVH = True -except ImportError: - HAS_OVH = False - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import quote_plus - - -def getOvhClient(ansibleModule): - endpoint = ansibleModule.params.get('endpoint') - application_key = ansibleModule.params.get('application_key') - application_secret = ansibleModule.params.get('application_secret') - consumer_key = ansibleModule.params.get('consumer_key') - - return ovh.Client( - endpoint=endpoint, - application_key=application_key, - application_secret=application_secret, - consumer_key=consumer_key - ) - - -def waitForNoTask(client, name, timeout): - currentTimeout = timeout - while client.get('/ip/{0}/task'.format(quote_plus(name)), - function='genericMoveFloatingIp', - status='todo'): - time.sleep(1) # Delay for 1 sec - currentTimeout -= 1 - if currentTimeout < 0: - return False - return True - - -def waitForTaskDone(client, name, taskId, timeout): - currentTimeout = timeout - while True: - task = client.get('/ip/{0}/task/{1}'.format(quote_plus(name), taskId)) - if task['status'] == 'done': - return True - time.sleep(5) # Delay for 5 sec because it's long to wait completion, do not harass the API - currentTimeout -= 5 - if currentTimeout < 0: - return False - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - service=dict(required=True), - endpoint=dict(required=True), - wait_completion=dict(default=True, type='bool'), - wait_task_completion=dict(default=0, type='int'), - application_key=dict(required=True, no_log=True), - application_secret=dict(required=True, no_log=True), - consumer_key=dict(required=True, no_log=True), - timeout=dict(default=120, type='int') - ), - supports_check_mode=True - ) - - result = dict( - changed=False - ) - - if not HAS_OVH: - module.fail_json(msg='ovh-api python module is required to run this module ') - - # Get parameters - name = module.params.get('name') - service = module.params.get('service') - timeout = module.params.get('timeout') - wait_completion = module.params.get('wait_completion') - wait_task_completion = module.params.get('wait_task_completion') - - # Connect to OVH API - client = getOvhClient(module) - - # Check that the load balancing exists - try: - ips = client.get('/ip', ip=name, type='failover') - except APIError as apiError: - module.fail_json( - msg='Unable to call OVH api for getting the list of ips, ' - 'check application key, secret, consumerkey and parameters. ' - 'Error returned by OVH api was : {0}'.format(apiError)) - - if name not in ips and '{0}/32'.format(name) not in ips: - module.fail_json(msg='IP {0} does not exist'.format(name)) - - # Check that no task is pending before going on - try: - if not waitForNoTask(client, name, timeout): - module.fail_json( - msg='Timeout of {0} seconds while waiting for no pending ' - 'tasks before executing the module '.format(timeout)) - except APIError as apiError: - module.fail_json( - msg='Unable to call OVH api for getting the list of pending tasks ' - 'of the ip, check application key, secret, consumerkey ' - 'and parameters. Error returned by OVH api was : {0}' - .format(apiError)) - - try: - ipproperties = client.get('/ip/{0}'.format(quote_plus(name))) - except APIError as apiError: - module.fail_json( - msg='Unable to call OVH api for getting the properties ' - 'of the ip, check application key, secret, consumerkey ' - 'and parameters. Error returned by OVH api was : {0}' - .format(apiError)) - - if ipproperties['routedTo']['serviceName'] != service: - if not module.check_mode: - if wait_task_completion == 0: - # Move the IP and get the created taskId - task = client.post('/ip/{0}/move'.format(quote_plus(name)), to=service) - taskId = task['taskId'] - result['moved'] = True - else: - # Just wait for the given taskId to be completed - taskId = wait_task_completion - result['moved'] = False - result['taskId'] = taskId - if wait_completion or wait_task_completion != 0: - if not waitForTaskDone(client, name, taskId, timeout): - module.fail_json( - msg='Timeout of {0} seconds while waiting for completion ' - 'of move ip to service'.format(timeout)) - result['waited'] = True - else: - result['waited'] = False - result['changed'] = True - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py b/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py deleted file mode 100644 index 28d6f3a1..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py +++ /dev/null @@ -1,312 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: ovh_ip_loadbalancing_backend -short_description: Manage OVH IP LoadBalancing backends -description: - - Manage OVH (French European hosting provider) LoadBalancing IP backends -author: Pascal Heraud (@pascalheraud) -notes: - - Uses the python OVH Api U(https://github.com/ovh/python-ovh). - You have to create an application (a key and secret) with a consumer - key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/) -requirements: - - ovh > 0.3.5 -options: - name: - required: true - description: - - Name of the LoadBalancing internal name (ip-X.X.X.X) - type: str - backend: - required: true - description: - - The IP address of the backend to update / modify / delete - type: str - state: - default: present - choices: ['present', 'absent'] - description: - - Determines whether the backend is to be created/modified - or deleted - type: str - probe: - default: 'none' - choices: ['none', 'http', 'icmp' , 'oco'] - description: - - Determines the type of probe to use for this backend - type: str - weight: - default: 8 - description: - - Determines the weight for this backend - type: int - endpoint: - required: true - description: - - The endpoint to use ( for instance ovh-eu) - type: str - application_key: - required: true - description: - - The applicationKey to use - type: str - application_secret: - required: true - description: - - The application secret to use - type: str - consumer_key: - required: true - description: - - The consumer key to use - type: str - timeout: - default: 120 - description: - - The timeout in seconds used to wait for a task to be - completed. - type: int - -''' - -EXAMPLES = ''' -- name: Adds or modify the backend '212.1.1.1' to a loadbalancing 'ip-1.1.1.1' - ovh_ip_loadbalancing: - name: ip-1.1.1.1 - backend: 212.1.1.1 - state: present - probe: none - weight: 8 - endpoint: ovh-eu - application_key: yourkey - application_secret: yoursecret - consumer_key: yourconsumerkey - -- name: Removes a backend '212.1.1.1' from a loadbalancing 'ip-1.1.1.1' - ovh_ip_loadbalancing: - name: ip-1.1.1.1 - backend: 212.1.1.1 - state: absent - endpoint: ovh-eu - application_key: yourkey - application_secret: yoursecret - consumer_key: yourconsumerkey -''' - -RETURN = ''' -''' - -import time - -try: - import ovh - import ovh.exceptions - from ovh.exceptions import APIError - HAS_OVH = True -except ImportError: - HAS_OVH = False - -from ansible.module_utils.basic import AnsibleModule - - -def getOvhClient(ansibleModule): - endpoint = ansibleModule.params.get('endpoint') - application_key = ansibleModule.params.get('application_key') - application_secret = ansibleModule.params.get('application_secret') - consumer_key = ansibleModule.params.get('consumer_key') - - return ovh.Client( - endpoint=endpoint, - application_key=application_key, - application_secret=application_secret, - consumer_key=consumer_key - ) - - -def waitForNoTask(client, name, timeout): - currentTimeout = timeout - while len(client.get('/ip/loadBalancing/{0}/task'.format(name))) > 0: - time.sleep(1) # Delay for 1 sec - currentTimeout -= 1 - if currentTimeout < 0: - return False - return True - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - backend=dict(required=True), - weight=dict(default=8, type='int'), - probe=dict(default='none', - choices=['none', 'http', 'icmp', 'oco']), - state=dict(default='present', choices=['present', 'absent']), - endpoint=dict(required=True), - application_key=dict(required=True, no_log=True), - application_secret=dict(required=True, no_log=True), - consumer_key=dict(required=True, no_log=True), - timeout=dict(default=120, type='int') - ) - ) - - if not HAS_OVH: - module.fail_json(msg='ovh-api python module' - 'is required to run this module ') - - # Get parameters - name = module.params.get('name') - state = module.params.get('state') - backend = module.params.get('backend') - weight = module.params.get('weight') - probe = module.params.get('probe') - timeout = module.params.get('timeout') - - # Connect to OVH API - client = getOvhClient(module) - - # Check that the load balancing exists - try: - loadBalancings = client.get('/ip/loadBalancing') - except APIError as apiError: - module.fail_json( - msg='Unable to call OVH api for getting the list of loadBalancing, ' - 'check application key, secret, consumerkey and parameters. ' - 'Error returned by OVH api was : {0}'.format(apiError)) - - if name not in loadBalancings: - module.fail_json(msg='IP LoadBalancing {0} does not exist'.format(name)) - - # Check that no task is pending before going on - try: - if not waitForNoTask(client, name, timeout): - module.fail_json( - msg='Timeout of {0} seconds while waiting for no pending ' - 'tasks before executing the module '.format(timeout)) - except APIError as apiError: - module.fail_json( - msg='Unable to call OVH api for getting the list of pending tasks ' - 'of the loadBalancing, check application key, secret, consumerkey ' - 'and parameters. Error returned by OVH api was : {0}' - .format(apiError)) - - try: - backends = client.get('/ip/loadBalancing/{0}/backend'.format(name)) - except APIError as apiError: - module.fail_json( - msg='Unable to call OVH api for getting the list of backends ' - 'of the loadBalancing, check application key, secret, consumerkey ' - 'and parameters. Error returned by OVH api was : {0}' - .format(apiError)) - - backendExists = backend in backends - moduleChanged = False - if state == "absent": - if backendExists: - # Remove backend - try: - client.delete( - '/ip/loadBalancing/{0}/backend/{1}'.format(name, backend)) - if not waitForNoTask(client, name, timeout): - module.fail_json( - msg='Timeout of {0} seconds while waiting for completion ' - 'of removing backend task'.format(timeout)) - except APIError as apiError: - module.fail_json( - msg='Unable to call OVH api for deleting the backend, ' - 'check application key, secret, consumerkey and ' - 'parameters. Error returned by OVH api was : {0}' - .format(apiError)) - moduleChanged = True - else: - if backendExists: - # Get properties - try: - backendProperties = client.get( - '/ip/loadBalancing/{0}/backend/{1}'.format(name, backend)) - except APIError as apiError: - module.fail_json( - msg='Unable to call OVH api for getting the backend properties, ' - 'check application key, secret, consumerkey and ' - 'parameters. Error returned by OVH api was : {0}' - .format(apiError)) - - if (backendProperties['weight'] != weight): - # Change weight - try: - client.post( - '/ip/loadBalancing/{0}/backend/{1}/setWeight' - .format(name, backend), weight=weight) - if not waitForNoTask(client, name, timeout): - module.fail_json( - msg='Timeout of {0} seconds while waiting for completion ' - 'of setWeight to backend task' - .format(timeout)) - except APIError as apiError: - module.fail_json( - msg='Unable to call OVH api for updating the weight of the ' - 'backend, check application key, secret, consumerkey ' - 'and parameters. Error returned by OVH api was : {0}' - .format(apiError)) - moduleChanged = True - - if (backendProperties['probe'] != probe): - # Change probe - backendProperties['probe'] = probe - try: - client.put( - '/ip/loadBalancing/{0}/backend/{1}' - .format(name, backend), probe=probe) - if not waitForNoTask(client, name, timeout): - module.fail_json( - msg='Timeout of {0} seconds while waiting for completion of ' - 'setProbe to backend task' - .format(timeout)) - except APIError as apiError: - module.fail_json( - msg='Unable to call OVH api for updating the probe of ' - 'the backend, check application key, secret, ' - 'consumerkey and parameters. Error returned by OVH api ' - 'was : {0}' - .format(apiError)) - moduleChanged = True - - else: - # Creates backend - try: - try: - client.post('/ip/loadBalancing/{0}/backend'.format(name), - ipBackend=backend, probe=probe, weight=weight) - except APIError as apiError: - module.fail_json( - msg='Unable to call OVH api for creating the backend, check ' - 'application key, secret, consumerkey and parameters. ' - 'Error returned by OVH api was : {0}' - .format(apiError)) - - if not waitForNoTask(client, name, timeout): - module.fail_json( - msg='Timeout of {0} seconds while waiting for completion of ' - 'backend creation task'.format(timeout)) - except APIError as apiError: - module.fail_json( - msg='Unable to call OVH api for creating the backend, check ' - 'application key, secret, consumerkey and parameters. ' - 'Error returned by OVH api was : {0}'.format(apiError)) - moduleChanged = True - - module.exit_json(changed=moduleChanged) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_monthly_billing.py b/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_monthly_billing.py deleted file mode 100644 index 75c70a79..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_monthly_billing.py +++ /dev/null @@ -1,157 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Francois Lallart (@fraff) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ovh_monthly_billing -author: Francois Lallart (@fraff) -version_added: '0.2.0' -short_description: Manage OVH monthly billing -description: - - Enable monthly billing on OVH cloud intances (be aware OVH does not allow to disable it). -requirements: [ "ovh" ] -options: - project_id: - required: true - type: str - description: - - ID of the project, get it with U(https://api.ovh.com/console/#/cloud/project#GET) - instance_id: - required: true - type: str - description: - - ID of the instance, get it with U(https://api.ovh.com/console/#/cloud/project/%7BserviceName%7D/instance#GET) - endpoint: - type: str - description: - - The endpoint to use (for instance ovh-eu) - application_key: - type: str - description: - - The applicationKey to use - application_secret: - type: str - description: - - The application secret to use - consumer_key: - type: str - description: - - The consumer key to use -''' - -EXAMPLES = ''' -- name: Basic usage, using auth from /etc/ovh.conf - community.general.ovh_monthly_billing: - project_id: 0c727a20aa144485b70c44dee9123b46 - instance_id: 8fa89ad2-8f08-4220-9fa4-9695ea23e948 - -# Get openstack cloud ID and instance ID, OVH use them in its API -- name: Get openstack cloud ID and instance ID - os_server_info: - cloud: myProjectName - region_name: myRegionName - server: myServerName - register: openstack_servers - -- name: Use IDs - community.general.ovh_monthly_billing: - project_id: "{{ openstack_servers.0.tenant_id }}" - instance_id: "{{ openstack_servers.0.id }}" - application_key: yourkey - application_secret: yoursecret - consumer_key: yourconsumerkey -''' - -RETURN = ''' -''' - -import os -import sys -import traceback - -try: - import ovh - import ovh.exceptions - from ovh.exceptions import APIError - HAS_OVH = True -except ImportError: - HAS_OVH = False - OVH_IMPORT_ERROR = traceback.format_exc() - -from ansible.module_utils.basic import AnsibleModule - - -def main(): - module = AnsibleModule( - argument_spec=dict( - project_id=dict(required=True), - instance_id=dict(required=True), - endpoint=dict(required=False), - application_key=dict(required=False, no_log=True), - application_secret=dict(required=False, no_log=True), - consumer_key=dict(required=False, no_log=True), - ), - supports_check_mode=True - ) - - # Get parameters - project_id = module.params.get('project_id') - instance_id = module.params.get('instance_id') - endpoint = module.params.get('endpoint') - application_key = module.params.get('application_key') - application_secret = module.params.get('application_secret') - consumer_key = module.params.get('consumer_key') - project = "" - instance = "" - ovh_billing_status = "" - - if not HAS_OVH: - module.fail_json(msg='python-ovh is required to run this module, see https://github.com/ovh/python-ovh') - - # Connect to OVH API - client = ovh.Client( - endpoint=endpoint, - application_key=application_key, - application_secret=application_secret, - consumer_key=consumer_key - ) - - # Check that the instance exists - try: - project = client.get('/cloud/project/{0}'.format(project_id)) - except ovh.exceptions.ResourceNotFoundError: - module.fail_json(msg='project {0} does not exist'.format(project_id)) - - # Check that the instance exists - try: - instance = client.get('/cloud/project/{0}/instance/{1}'.format(project_id, instance_id)) - except ovh.exceptions.ResourceNotFoundError: - module.fail_json(msg='instance {0} does not exist in project {1}'.format(instance_id, project_id)) - - # Is monthlyBilling already enabled or pending ? - if instance['monthlyBilling'] is not None: - if instance['monthlyBilling']['status'] in ['ok', 'activationPending']: - module.exit_json(changed=False, ovh_billing_status=instance['monthlyBilling']) - - if module.check_mode: - module.exit_json(changed=True, msg="Dry Run!") - - try: - ovh_billing_status = client.post('/cloud/project/{0}/instance/{1}/activeMonthlyBilling'.format(project_id, instance_id)) - module.exit_json(changed=True, ovh_billing_status=ovh_billing_status['monthlyBilling']) - except APIError as apiError: - module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError)) - - # We should never reach here - module.fail_json(msg='Internal ovh_monthly_billing module error') - - -if __name__ == "__main__": - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/packet/packet_device.py b/ansible_collections/community/general/plugins/modules/cloud/packet/packet_device.py deleted file mode 100644 index 5912a6f4..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/packet/packet_device.py +++ /dev/null @@ -1,670 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2016, Tomas Karasek -# (c) 2016, Matt Baldwin -# (c) 2016, Thibaud Morel l'Horset -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: packet_device - -short_description: Manage a bare metal server in the Packet Host. - -description: - - Manage a bare metal server in the Packet Host (a "device" in the API terms). - - When the machine is created it can optionally wait for public IP address, or for active state. - - This module has a dependency on packet >= 1.0. - - API is documented at U(https://www.packet.net/developers/api/devices). - - -author: - - Tomas Karasek (@t0mk) - - Matt Baldwin (@baldwinSPC) - - Thibaud Morel l'Horset (@teebes) - -options: - auth_token: - description: - - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN). - type: str - - count: - description: - - The number of devices to create. Count number can be included in hostname via the %d string formatter. - default: 1 - type: int - - count_offset: - description: - - From which number to start the count. - default: 1 - type: int - - device_ids: - description: - - List of device IDs on which to operate. - type: list - elements: str - - tags: - description: - - List of device tags. - - Currently implemented only for device creation. - type: list - elements: str - version_added: '0.2.0' - - facility: - description: - - Facility slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/facilities/). - type: str - - features: - description: - - Dict with "features" for device creation. See Packet API docs for details. - type: dict - - hostnames: - description: - - A hostname of a device, or a list of hostnames. - - If given string or one-item list, you can use the C("%d") Python string format to expand numbers from I(count). - - If only one hostname, it might be expanded to list if I(count)>1. - aliases: [name] - type: list - elements: str - - locked: - description: - - Whether to lock a created device. - default: false - aliases: [lock] - type: bool - - operating_system: - description: - - OS slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/operatingsystems/). - type: str - - plan: - description: - - Plan slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/plans/). - type: str - - project_id: - description: - - ID of project of the device. - required: true - type: str - - state: - description: - - Desired state of the device. - - If set to C(present) (the default), the module call will return immediately after the device-creating HTTP request successfully returns. - - If set to C(active), the module call will block until all the specified devices are in state active due to the Packet API, or until I(wait_timeout). - choices: [present, absent, active, inactive, rebooted] - default: present - type: str - - user_data: - description: - - Userdata blob made available to the machine - type: str - - wait_for_public_IPv: - description: - - Whether to wait for the instance to be assigned a public IPv4/IPv6 address. - - If set to 4, it will wait until IPv4 is assigned to the instance. - - If set to 6, wait until public IPv6 is assigned to the instance. - choices: [4,6] - type: int - - wait_timeout: - description: - - How long (seconds) to wait either for automatic IP address assignment, or for the device to reach the C(active) I(state). - - If I(wait_for_public_IPv) is set and I(state) is C(active), the module will wait for both events consequently, applying the timeout twice. - default: 900 - type: int - - ipxe_script_url: - description: - - URL of custom iPXE script for provisioning. - - More about custom iPXE for Packet devices at U(https://help.packet.net/technical/infrastructure/custom-ipxe). - type: str - - always_pxe: - description: - - Persist PXE as the first boot option. - - Normally, the PXE process happens only on the first boot. Set this arg to have your device continuously boot to iPXE. - default: false - type: bool - - -requirements: - - "packet-python >= 1.35" - -notes: - - Doesn't support check mode. - -''' - -EXAMPLES = ''' -# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. -# You can also pass it to the auth_token parameter of the module instead. - -# Creating devices - -- name: Create 1 device - hosts: localhost - tasks: - - community.general.packet_device: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - hostnames: myserver - tags: ci-xyz - operating_system: ubuntu_16_04 - plan: baremetal_0 - facility: sjc1 - -# Create the same device and wait until it is in state "active", (when it's -# ready for other API operations). Fail if the device is not "active" in -# 10 minutes. - -- name: Create device and wait up to 10 minutes for active state - hosts: localhost - tasks: - - community.general.packet_device: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - hostnames: myserver - operating_system: ubuntu_16_04 - plan: baremetal_0 - facility: sjc1 - state: active - wait_timeout: 600 - -- name: Create 3 ubuntu devices called server-01, server-02 and server-03 - hosts: localhost - tasks: - - community.general.packet_device: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - hostnames: server-%02d - count: 3 - operating_system: ubuntu_16_04 - plan: baremetal_0 - facility: sjc1 - -- name: Create 3 coreos devices with userdata, wait until they get IPs and then wait for SSH - hosts: localhost - tasks: - - name: Create 3 devices and register their facts - community.general.packet_device: - hostnames: [coreos-one, coreos-two, coreos-three] - operating_system: coreos_stable - plan: baremetal_0 - facility: ewr1 - locked: true - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - wait_for_public_IPv: 4 - user_data: | - #cloud-config - ssh_authorized_keys: - - {{ lookup('file', 'my_packet_sshkey') }} - coreos: - etcd: - discovery: https://discovery.etcd.io/6a28e078895c5ec737174db2419bb2f3 - addr: $private_ipv4:4001 - peer-addr: $private_ipv4:7001 - fleet: - public-ip: $private_ipv4 - units: - - name: etcd.service - command: start - - name: fleet.service - command: start - register: newhosts - - - name: Wait for ssh - ansible.builtin.wait_for: - delay: 1 - host: "{{ item.public_ipv4 }}" - port: 22 - state: started - timeout: 500 - with_items: "{{ newhosts.devices }}" - - -# Other states of devices - -- name: Remove 3 devices by uuid - hosts: localhost - tasks: - - community.general.packet_device: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - state: absent - device_ids: - - 1fb4faf8-a638-4ac7-8f47-86fe514c30d8 - - 2eb4faf8-a638-4ac7-8f47-86fe514c3043 - - 6bb4faf8-a638-4ac7-8f47-86fe514c301f -''' - -RETURN = ''' -changed: - description: True if a device was altered in any way (created, modified or removed) - type: bool - sample: True - returned: success - -devices: - description: Information about each device that was processed - type: list - sample: '[{"hostname": "my-server.com", "id": "2a5122b9-c323-4d5c-b53c-9ad3f54273e7", - "public_ipv4": "147.229.15.12", "private-ipv4": "10.0.15.12", - "tags": [], "locked": false, "state": "provisioning", - "public_ipv6": ""2604:1380:2:5200::3"}]' - returned: success -''' # NOQA - - -import os -import re -import time -import uuid -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - -HAS_PACKET_SDK = True -try: - import packet -except ImportError: - HAS_PACKET_SDK = False - -from ansible.module_utils.basic import AnsibleModule - - -NAME_RE = r'({0}|{0}{1}*{0})'.format(r'[a-zA-Z0-9]', r'[a-zA-Z0-9\-]') -HOSTNAME_RE = r'({0}\.)*{0}$'.format(NAME_RE) -MAX_DEVICES = 100 - -PACKET_DEVICE_STATES = ( - 'queued', - 'provisioning', - 'failed', - 'powering_on', - 'active', - 'powering_off', - 'inactive', - 'rebooting', -) - -PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" - - -ALLOWED_STATES = ['absent', 'active', 'inactive', 'rebooted', 'present'] - - -def serialize_device(device): - """ - Standard representation for a device as returned by various tasks:: - - { - 'id': 'device_id' - 'hostname': 'device_hostname', - 'tags': [], - 'locked': false, - 'state': 'provisioning', - 'ip_addresses': [ - { - "address": "147.75.194.227", - "address_family": 4, - "public": true - }, - { - "address": "2604:1380:2:5200::3", - "address_family": 6, - "public": true - }, - { - "address": "10.100.11.129", - "address_family": 4, - "public": false - } - ], - "private_ipv4": "10.100.11.129", - "public_ipv4": "147.75.194.227", - "public_ipv6": "2604:1380:2:5200::3", - } - - """ - device_data = {} - device_data['id'] = device.id - device_data['hostname'] = device.hostname - device_data['tags'] = device.tags - device_data['locked'] = device.locked - device_data['state'] = device.state - device_data['ip_addresses'] = [ - { - 'address': addr_data['address'], - 'address_family': addr_data['address_family'], - 'public': addr_data['public'], - } - for addr_data in device.ip_addresses - ] - # Also include each IPs as a key for easier lookup in roles. - # Key names: - # - public_ipv4 - # - public_ipv6 - # - private_ipv4 - # - private_ipv6 (if there is one) - for ipdata in device_data['ip_addresses']: - if ipdata['public']: - if ipdata['address_family'] == 6: - device_data['public_ipv6'] = ipdata['address'] - elif ipdata['address_family'] == 4: - device_data['public_ipv4'] = ipdata['address'] - elif not ipdata['public']: - if ipdata['address_family'] == 6: - # Packet doesn't give public ipv6 yet, but maybe one - # day they will - device_data['private_ipv6'] = ipdata['address'] - elif ipdata['address_family'] == 4: - device_data['private_ipv4'] = ipdata['address'] - return device_data - - -def is_valid_hostname(hostname): - return re.match(HOSTNAME_RE, hostname) is not None - - -def is_valid_uuid(myuuid): - try: - val = uuid.UUID(myuuid, version=4) - except ValueError: - return False - return str(val) == myuuid - - -def listify_string_name_or_id(s): - if ',' in s: - return s.split(',') - else: - return [s] - - -def get_hostname_list(module): - # hostname is a list-typed param, so I guess it should return list - # (and it does, in Ansible 2.2.1) but in order to be defensive, - # I keep here the code to convert an eventual string to list - hostnames = module.params.get('hostnames') - count = module.params.get('count') - count_offset = module.params.get('count_offset') - if isinstance(hostnames, str): - hostnames = listify_string_name_or_id(hostnames) - if not isinstance(hostnames, list): - raise Exception("name %s is not convertible to list" % hostnames) - - # at this point, hostnames is a list - hostnames = [h.strip() for h in hostnames] - - if (len(hostnames) > 1) and (count > 1): - _msg = ("If you set count>1, you should only specify one hostname " - "with the %d formatter, not a list of hostnames.") - raise Exception(_msg) - - if (len(hostnames) == 1) and (count > 0): - hostname_spec = hostnames[0] - count_range = range(count_offset, count_offset + count) - if re.search(r"%\d{0,2}d", hostname_spec): - hostnames = [hostname_spec % i for i in count_range] - elif count > 1: - hostname_spec = '%s%%02d' % hostname_spec - hostnames = [hostname_spec % i for i in count_range] - - for hn in hostnames: - if not is_valid_hostname(hn): - raise Exception("Hostname '%s' does not seem to be valid" % hn) - - if len(hostnames) > MAX_DEVICES: - raise Exception("You specified too many hostnames, max is %d" % - MAX_DEVICES) - return hostnames - - -def get_device_id_list(module): - device_ids = module.params.get('device_ids') - - if isinstance(device_ids, str): - device_ids = listify_string_name_or_id(device_ids) - - device_ids = [di.strip() for di in device_ids] - - for di in device_ids: - if not is_valid_uuid(di): - raise Exception("Device ID '%s' does not seem to be valid" % di) - - if len(device_ids) > MAX_DEVICES: - raise Exception("You specified too many devices, max is %d" % - MAX_DEVICES) - return device_ids - - -def create_single_device(module, packet_conn, hostname): - - for param in ('hostnames', 'operating_system', 'plan'): - if not module.params.get(param): - raise Exception("%s parameter is required for new device." - % param) - project_id = module.params.get('project_id') - plan = module.params.get('plan') - tags = module.params.get('tags') - user_data = module.params.get('user_data') - facility = module.params.get('facility') - operating_system = module.params.get('operating_system') - locked = module.params.get('locked') - ipxe_script_url = module.params.get('ipxe_script_url') - always_pxe = module.params.get('always_pxe') - if operating_system != 'custom_ipxe': - for param in ('ipxe_script_url', 'always_pxe'): - if module.params.get(param): - raise Exception('%s parameter is not valid for non custom_ipxe operating_system.' % param) - - device = packet_conn.create_device( - project_id=project_id, - hostname=hostname, - tags=tags, - plan=plan, - facility=facility, - operating_system=operating_system, - userdata=user_data, - locked=locked, - ipxe_script_url=ipxe_script_url, - always_pxe=always_pxe) - return device - - -def refresh_device_list(module, packet_conn, devices): - device_ids = [d.id for d in devices] - new_device_list = get_existing_devices(module, packet_conn) - return [d for d in new_device_list if d.id in device_ids] - - -def wait_for_devices_active(module, packet_conn, watched_devices): - wait_timeout = module.params.get('wait_timeout') - wait_timeout = time.time() + wait_timeout - refreshed = watched_devices - while wait_timeout > time.time(): - refreshed = refresh_device_list(module, packet_conn, watched_devices) - if all(d.state == 'active' for d in refreshed): - return refreshed - time.sleep(5) - raise Exception("Waiting for state \"active\" timed out for devices: %s" - % [d.hostname for d in refreshed if d.state != "active"]) - - -def wait_for_public_IPv(module, packet_conn, created_devices): - - def has_public_ip(addr_list, ip_v): - return any(a['public'] and a['address_family'] == ip_v and a['address'] for a in addr_list) - - def all_have_public_ip(ds, ip_v): - return all(has_public_ip(d.ip_addresses, ip_v) for d in ds) - - address_family = module.params.get('wait_for_public_IPv') - - wait_timeout = module.params.get('wait_timeout') - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - refreshed = refresh_device_list(module, packet_conn, created_devices) - if all_have_public_ip(refreshed, address_family): - return refreshed - time.sleep(5) - - raise Exception("Waiting for IPv%d address timed out. Hostnames: %s" - % (address_family, [d.hostname for d in created_devices])) - - -def get_existing_devices(module, packet_conn): - project_id = module.params.get('project_id') - return packet_conn.list_devices( - project_id, params={ - 'per_page': MAX_DEVICES}) - - -def get_specified_device_identifiers(module): - if module.params.get('device_ids'): - device_id_list = get_device_id_list(module) - return {'ids': device_id_list, 'hostnames': []} - elif module.params.get('hostnames'): - hostname_list = get_hostname_list(module) - return {'hostnames': hostname_list, 'ids': []} - - -def act_on_devices(module, packet_conn, target_state): - specified_identifiers = get_specified_device_identifiers(module) - existing_devices = get_existing_devices(module, packet_conn) - changed = False - create_hostnames = [] - if target_state in ['present', 'active', 'rebooted']: - # states where we might create non-existing specified devices - existing_devices_names = [ed.hostname for ed in existing_devices] - create_hostnames = [hn for hn in specified_identifiers['hostnames'] - if hn not in existing_devices_names] - - process_devices = [d for d in existing_devices - if (d.id in specified_identifiers['ids']) or - (d.hostname in specified_identifiers['hostnames'])] - - if target_state != 'present': - _absent_state_map = {} - for s in PACKET_DEVICE_STATES: - _absent_state_map[s] = packet.Device.delete - - state_map = { - 'absent': _absent_state_map, - 'active': {'inactive': packet.Device.power_on, - 'provisioning': None, 'rebooting': None - }, - 'inactive': {'active': packet.Device.power_off}, - 'rebooted': {'active': packet.Device.reboot, - 'inactive': packet.Device.power_on, - 'provisioning': None, 'rebooting': None - }, - } - - # First do non-creation actions, it might be faster - for d in process_devices: - if d.state == target_state: - continue - if d.state in state_map[target_state]: - api_operation = state_map[target_state].get(d.state) - if api_operation is not None: - api_operation(d) - changed = True - else: - _msg = ( - "I don't know how to process existing device %s from state %s " - "to state %s" % - (d.hostname, d.state, target_state)) - raise Exception(_msg) - - # At last create missing devices - created_devices = [] - if create_hostnames: - created_devices = [create_single_device(module, packet_conn, n) - for n in create_hostnames] - if module.params.get('wait_for_public_IPv'): - created_devices = wait_for_public_IPv( - module, packet_conn, created_devices) - changed = True - - processed_devices = created_devices + process_devices - if target_state == 'active': - processed_devices = wait_for_devices_active( - module, packet_conn, processed_devices) - - return { - 'changed': changed, - 'devices': [serialize_device(d) for d in processed_devices] - } - - -def main(): - module = AnsibleModule( - argument_spec=dict( - auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR), - no_log=True), - count=dict(type='int', default=1), - count_offset=dict(type='int', default=1), - device_ids=dict(type='list', elements='str'), - facility=dict(), - features=dict(type='dict'), - hostnames=dict(type='list', elements='str', aliases=['name']), - tags=dict(type='list', elements='str'), - locked=dict(type='bool', default=False, aliases=['lock']), - operating_system=dict(), - plan=dict(), - project_id=dict(required=True), - state=dict(choices=ALLOWED_STATES, default='present'), - user_data=dict(default=None), - wait_for_public_IPv=dict(type='int', choices=[4, 6]), - wait_timeout=dict(type='int', default=900), - ipxe_script_url=dict(default=''), - always_pxe=dict(type='bool', default=False), - ), - required_one_of=[('device_ids', 'hostnames',)], - mutually_exclusive=[ - ('hostnames', 'device_ids'), - ('count', 'device_ids'), - ('count_offset', 'device_ids'), - ] - ) - - if not HAS_PACKET_SDK: - module.fail_json(msg='packet required for this module') - - if not module.params.get('auth_token'): - _fail_msg = ("if Packet API token is not in environment variable %s, " - "the auth_token parameter is required" % - PACKET_API_TOKEN_ENV_VAR) - module.fail_json(msg=_fail_msg) - - auth_token = module.params.get('auth_token') - - packet_conn = packet.Manager(auth_token=auth_token) - - state = module.params.get('state') - - try: - module.exit_json(**act_on_devices(module, packet_conn, state)) - except Exception as e: - module.fail_json(msg='failed to set device state %s, error: %s' % - (state, to_native(e)), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/packet/packet_ip_subnet.py b/ansible_collections/community/general/plugins/modules/cloud/packet/packet_ip_subnet.py deleted file mode 100644 index 718de36f..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/packet/packet_ip_subnet.py +++ /dev/null @@ -1,326 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Nurfet Becirevic -# Copyright: (c) 2017, Tomas Karasek -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: packet_ip_subnet - -short_description: Assign IP subnet to a bare metal server. - -description: - - Assign or unassign IPv4 or IPv6 subnets to or from a device in the Packet host. - - IPv4 subnets must come from already reserved block. - - IPv6 subnets must come from publicly routable /56 block from your project. - - See U(https://support.packet.com/kb/articles/elastic-ips) for more info on IP block reservation. - -version_added: '0.2.0' - -author: - - Tomas Karasek (@t0mk) - - Nurfet Becirevic (@nurfet-becirevic) - -options: - auth_token: - description: - - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN). - type: str - - hostname: - description: - - A hostname of a device to/from which to assign/remove a subnet. - required: False - type: str - - device_id: - description: - - UUID of a device to/from which to assign/remove a subnet. - required: False - type: str - - project_id: - description: - - UUID of a project of the device to/from which to assign/remove a subnet. - type: str - - device_count: - description: - - The number of devices to retrieve from the project. The max allowed value is 1000. - - See U(https://www.packet.com/developers/api/#retrieve-all-devices-of-a-project) for more info. - default: 100 - type: int - - cidr: - description: - - IPv4 or IPv6 subnet which you want to manage. It must come from a reserved block for your project in the Packet Host. - aliases: [name] - type: str - required: true - - state: - description: - - Desired state of the IP subnet on the specified device. - - With state == C(present), you must specify either hostname or device_id. Subnet with given CIDR will then be assigned to the specified device. - - With state == C(absent), you can specify either hostname or device_id. The subnet will be removed from specified devices. - - If you leave both hostname and device_id empty, the subnet will be removed from any device it's assigned to. - choices: ['present', 'absent'] - default: 'present' - type: str - -requirements: - - "packet-python >= 1.35" - - "python >= 2.6" -''' - -EXAMPLES = ''' -# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. -# You can also pass it to the auth_token parameter of the module instead. - -- name: Create 1 device and assign an arbitrary public IPv4 subnet to it - hosts: localhost - tasks: - - - packet_device: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - hostnames: myserver - operating_system: ubuntu_16_04 - plan: baremetal_0 - facility: sjc1 - state: active - -# Pick an IPv4 address from a block allocated to your project. - - - community.general.packet_ip_subnet: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - hostname: myserver - cidr: "147.75.201.78/32" - -# Release IP address 147.75.201.78 - -- name: Unassign IP address from any device in your project - hosts: localhost - tasks: - - community.general.packet_ip_subnet: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - cidr: "147.75.201.78/32" - state: absent -''' - -RETURN = ''' -changed: - description: True if an IP address assignments were altered in any way (created or removed). - type: bool - sample: True - returned: success - -device_id: - type: str - description: UUID of the device associated with the specified IP address. - returned: success - -subnet: - description: Dict with data about the handled IP subnet. - type: dict - sample: - address: 147.75.90.241 - address_family: 4 - assigned_to: { href : /devices/61f9aa5e-0530-47f5-97c2-113828e61ed0 } - cidr: 31 - created_at: '2017-08-07T15:15:30Z' - enabled: True - gateway: 147.75.90.240 - href: /ips/31eda960-0a16-4c0f-b196-f3dc4928529f - id: 1eda960-0a16-4c0f-b196-f3dc4928529f - manageable: True - management: True - netmask: 255.255.255.254 - network: 147.75.90.240 - public: True - returned: success -''' - - -import uuid -import re - -from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible.module_utils.common.text.converters import to_native - -HAS_PACKET_SDK = True - -try: - import packet -except ImportError: - HAS_PACKET_SDK = False - - -NAME_RE = r'({0}|{0}{1}*{0})'.format(r'[a-zA-Z0-9]', r'[a-zA-Z0-9\-]') -HOSTNAME_RE = r'({0}\.)*{0}$'.format(NAME_RE) -PROJECT_MAX_DEVICES = 100 - - -PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" - - -ALLOWED_STATES = ['absent', 'present'] - - -def is_valid_hostname(hostname): - return re.match(HOSTNAME_RE, hostname) is not None - - -def is_valid_uuid(myuuid): - try: - val = uuid.UUID(myuuid, version=4) - except ValueError: - return False - return str(val) == myuuid - - -def get_existing_devices(module, packet_conn): - project_id = module.params.get('project_id') - if not is_valid_uuid(project_id): - raise Exception("Project ID {0} does not seem to be valid".format(project_id)) - - per_page = module.params.get('device_count') - return packet_conn.list_devices( - project_id, params={'per_page': per_page}) - - -def get_specified_device_identifiers(module): - if module.params.get('device_id'): - _d_id = module.params.get('device_id') - if not is_valid_uuid(_d_id): - raise Exception("Device ID '{0}' does not seem to be valid".format(_d_id)) - return {'device_id': _d_id, 'hostname': None} - elif module.params.get('hostname'): - _hn = module.params.get('hostname') - if not is_valid_hostname(_hn): - raise Exception("Hostname '{0}' does not seem to be valid".format(_hn)) - return {'hostname': _hn, 'device_id': None} - else: - return {'hostname': None, 'device_id': None} - - -def parse_subnet_cidr(cidr): - if "/" not in cidr: - raise Exception("CIDR expression in wrong format, must be address/prefix_len") - addr, prefixlen = cidr.split("/") - try: - prefixlen = int(prefixlen) - except ValueError: - raise("Wrong prefix length in CIDR expression {0}".format(cidr)) - return addr, prefixlen - - -def act_on_assignment(target_state, module, packet_conn): - return_dict = {'changed': False} - specified_cidr = module.params.get("cidr") - address, prefixlen = parse_subnet_cidr(specified_cidr) - - specified_identifier = get_specified_device_identifiers(module) - - if module.check_mode: - return return_dict - - if (specified_identifier['hostname'] is None) and ( - specified_identifier['device_id'] is None): - if target_state == 'absent': - # The special case to release the IP from any assignment - for d in get_existing_devices(module, packet_conn): - for ia in d.ip_addresses: - if address == ia['address'] and prefixlen == ia['cidr']: - packet_conn.call_api(ia['href'], "DELETE") - return_dict['changed'] = True - return_dict['subnet'] = ia - return_dict['device_id'] = d.id - return return_dict - raise Exception("If you assign an address, you must specify either " - "target device ID or target unique hostname.") - - if specified_identifier['device_id'] is not None: - device = packet_conn.get_device(specified_identifier['device_id']) - else: - all_devices = get_existing_devices(module, packet_conn) - hn = specified_identifier['hostname'] - matching_devices = [d for d in all_devices if d.hostname == hn] - if len(matching_devices) > 1: - raise Exception("There are more than one devices matching given hostname {0}".format(hn)) - if len(matching_devices) == 0: - raise Exception("There is no device matching given hostname {0}".format(hn)) - device = matching_devices[0] - - return_dict['device_id'] = device.id - assignment_dicts = [i for i in device.ip_addresses - if i['address'] == address and i['cidr'] == prefixlen] - if len(assignment_dicts) > 1: - raise Exception("IP address {0} is assigned more than once for device {1}".format( - specified_cidr, device.hostname)) - - if target_state == "absent": - if len(assignment_dicts) == 1: - packet_conn.call_api(assignment_dicts[0]['href'], "DELETE") - return_dict['subnet'] = assignment_dicts[0] - return_dict['changed'] = True - elif target_state == "present": - if len(assignment_dicts) == 0: - new_assignment = packet_conn.call_api( - "devices/{0}/ips".format(device.id), "POST", {"address": "{0}".format(specified_cidr)}) - return_dict['changed'] = True - return_dict['subnet'] = new_assignment - return return_dict - - -def main(): - module = AnsibleModule( - argument_spec=dict( - auth_token=dict( - type='str', - fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]), - no_log=True - ), - device_id=dict(type='str'), - hostname=dict(type='str'), - project_id=dict(type='str'), - device_count=dict(type='int', default=PROJECT_MAX_DEVICES), - cidr=dict(type='str', required=True, aliases=['name']), - state=dict(choices=ALLOWED_STATES, default='present'), - ), - supports_check_mode=True, - mutually_exclusive=[('hostname', 'device_id')], - required_one_of=[['hostname', 'device_id', 'project_id']], - required_by=dict( - hostname=('project_id',), - ), - ) - - if not HAS_PACKET_SDK: - module.fail_json(msg='packet required for this module') - - if not module.params.get('auth_token'): - _fail_msg = ("if Packet API token is not in environment variable {0}, " - "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR)) - module.fail_json(msg=_fail_msg) - - auth_token = module.params.get('auth_token') - - packet_conn = packet.Manager(auth_token=auth_token) - - state = module.params.get('state') - - try: - module.exit_json(**act_on_assignment(state, module, packet_conn)) - except Exception as e: - module.fail_json( - msg="failed to set IP subnet to state {0}, error: {1}".format(state, to_native(e))) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/packet/packet_project.py b/ansible_collections/community/general/plugins/modules/cloud/packet/packet_project.py deleted file mode 100644 index c6502c6e..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/packet/packet_project.py +++ /dev/null @@ -1,244 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Nurfet Becirevic -# Copyright: (c) 2019, Tomas Karasek -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: packet_project - -short_description: Create/delete a project in Packet host. - -description: - - Create/delete a project in Packet host. - - API is documented at U(https://www.packet.com/developers/api/#projects). - -version_added: '0.2.0' - -author: - - Tomas Karasek (@t0mk) - - Nurfet Becirevic (@nurfet-becirevic) - -options: - state: - description: - - Indicate desired state of the target. - default: present - choices: ['present', 'absent'] - type: str - - payment_method: - description: - - Payment method is name of one of the payment methods available to your user. - - When blank, the API assumes the default payment method. - type: str - - auth_token: - description: - - Packet api token. You can also supply it in env var C(PACKET_API_TOKEN). - type: str - - name: - description: - - Name for/of the project. - type: str - - org_id: - description: - - UUID of the organization to create a project for. - - When blank, the API assumes the default organization. - type: str - - id: - description: - - UUID of the project which you want to remove. - type: str - - custom_data: - description: - - Custom data about the project to create. - type: str - -requirements: - - "python >= 2.6" - - "packet-python >= 1.40" - -''' - -EXAMPLES = ''' -# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. -# You can also pass the api token in module param auth_token. - -- name: Create new project - hosts: localhost - tasks: - community.general.packet_project: - name: "new project" - -- name: Create new project within non-default organization - hosts: localhost - tasks: - community.general.packet_project: - name: "my org project" - org_id: a4cc87f9-e00f-48c2-9460-74aa60beb6b0 - -- name: Remove project by id - hosts: localhost - tasks: - community.general.packet_project: - state: absent - id: eef49903-7a09-4ca1-af67-4087c29ab5b6 - -- name: Create new project with non-default billing method - hosts: localhost - tasks: - community.general.packet_project: - name: "newer project" - payment_method: "the other visa" -''' - -RETURN = ''' -changed: - description: True if a project was created or removed. - type: bool - sample: True - returned: success - -name: - description: Name of addressed project. - type: str - returned: success - -id: - description: UUID of addressed project. - type: str - returned: success -''' - -from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible.module_utils.common.text.converters import to_native - -HAS_PACKET_SDK = True - -try: - import packet -except ImportError: - HAS_PACKET_SDK = False - -PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" - - -def act_on_project(target_state, module, packet_conn): - result_dict = {'changed': False} - given_id = module.params.get('id') - given_name = module.params.get('name') - if given_id: - matching_projects = [ - p for p in packet_conn.list_projects() if given_id == p.id] - else: - matching_projects = [ - p for p in packet_conn.list_projects() if given_name == p.name] - - if target_state == 'present': - if len(matching_projects) == 0: - org_id = module.params.get('org_id') - custom_data = module.params.get('custom_data') - payment_method = module.params.get('payment_method') - - if not org_id: - params = { - "name": given_name, - "payment_method_id": payment_method, - "customdata": custom_data - } - new_project_data = packet_conn.call_api("projects", "POST", params) - new_project = packet.Project(new_project_data, packet_conn) - else: - new_project = packet_conn.create_organization_project( - org_id=org_id, - name=given_name, - payment_method_id=payment_method, - customdata=custom_data - ) - - result_dict['changed'] = True - matching_projects.append(new_project) - - result_dict['name'] = matching_projects[0].name - result_dict['id'] = matching_projects[0].id - else: - if len(matching_projects) > 1: - _msg = ("More than projects matched for module call with state = absent: " - "{0}".format(to_native(matching_projects))) - module.fail_json(msg=_msg) - - if len(matching_projects) == 1: - p = matching_projects[0] - result_dict['name'] = p.name - result_dict['id'] = p.id - result_dict['changed'] = True - try: - p.delete() - except Exception as e: - _msg = ("while trying to remove project {0}, id {1}, got error: {2}".format( - p.name, p.id, to_native(e))) - module.fail_json(msg=_msg) - return result_dict - - -def main(): - module = AnsibleModule( - argument_spec=dict( - state=dict(choices=['present', 'absent'], default='present'), - auth_token=dict( - type='str', - fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]), - no_log=True - ), - name=dict(type='str'), - id=dict(type='str'), - org_id=dict(type='str'), - payment_method=dict(type='str'), - custom_data=dict(type='str'), - ), - supports_check_mode=True, - required_one_of=[("name", "id",)], - mutually_exclusive=[ - ('name', 'id'), - ] - ) - if not HAS_PACKET_SDK: - module.fail_json(msg='packet required for this module') - - if not module.params.get('auth_token'): - _fail_msg = ("if Packet API token is not in environment variable {0}, " - "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR)) - module.fail_json(msg=_fail_msg) - - auth_token = module.params.get('auth_token') - - packet_conn = packet.Manager(auth_token=auth_token) - - state = module.params.get('state') - - if state in ['present', 'absent']: - if module.check_mode: - module.exit_json(changed=False) - - try: - module.exit_json(**act_on_project(state, module, packet_conn)) - except Exception as e: - module.fail_json( - msg="failed to set project state {0}: {1}".format(state, to_native(e))) - else: - module.fail_json(msg="{0} is not a valid state for this module".format(state)) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/packet/packet_sshkey.py b/ansible_collections/community/general/plugins/modules/cloud/packet/packet_sshkey.py deleted file mode 100644 index 4800718f..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/packet/packet_sshkey.py +++ /dev/null @@ -1,270 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright 2016 Tomas Karasek -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: packet_sshkey -short_description: Create/delete an SSH key in Packet host. -description: - - Create/delete an SSH key in Packet host. - - API is documented at U(https://www.packet.net/help/api/#page:ssh-keys,header:ssh-keys-ssh-keys-post). -author: "Tomas Karasek (@t0mk) " -options: - state: - description: - - Indicate desired state of the target. - default: present - choices: ['present', 'absent'] - type: str - auth_token: - description: - - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN). - type: str - label: - description: - - Label for the key. If you keep it empty, it will be read from key string. - type: str - aliases: [name] - id: - description: - - UUID of the key which you want to remove. - type: str - fingerprint: - description: - - Fingerprint of the key which you want to remove. - type: str - key: - description: - - Public Key string ({type} {base64 encoded key} {description}). - type: str - key_file: - description: - - File with the public key. - type: path - -requirements: - - "python >= 2.6" - - packet-python - -''' - -EXAMPLES = ''' -# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. -# You can also pass the api token in module param auth_token. - -- name: Create sshkey from string - hosts: localhost - tasks: - community.general.packet_sshkey: - key: "{{ lookup('file', 'my_packet_sshkey.pub') }}" - -- name: Create sshkey from file - hosts: localhost - tasks: - community.general.packet_sshkey: - label: key from file - key_file: ~/ff.pub - -- name: Remove sshkey by id - hosts: localhost - tasks: - community.general.packet_sshkey: - state: absent - id: eef49903-7a09-4ca1-af67-4087c29ab5b6 -''' - -RETURN = ''' -changed: - description: True if a sshkey was created or removed. - type: bool - sample: True - returned: always -sshkeys: - description: Information about sshkeys that were created/removed. - type: list - sample: [ - { - "fingerprint": "5c:93:74:7c:ed:07:17:62:28:75:79:23:d6:08:93:46", - "id": "41d61bd8-3342-428b-a09c-e67bdd18a9b7", - "key": "ssh-dss AAAAB3NzaC1kc3MAAACBAIfNT5S0ncP4BBJBYNhNPxFF9lqVhfPeu6SM1LoCocxqDc1AT3zFRi8hjIf6TLZ2AA4FYbcAWxLMhiBxZRVldT9GdBXile78kAK5z3bKTwq152DCqpxwwbaTIggLFhsU8wrfBsPWnDuAxZ0h7mmrCjoLIE3CNLDA/NmV3iB8xMThAAAAFQCStcesSgR1adPORzBxTr7hug92LwAAAIBOProm3Gk+HWedLyE8IfofLaOeRnbBRHAOL4z0SexKkVOnQ/LGN/uDIIPGGBDYTvXgKZT+jbHeulRJ2jKgfSpGKN4JxFQ8uzVH492jEiiUJtT72Ss1dCV4PmyERVIw+f54itihV3z/t25dWgowhb0int8iC/OY3cGodlmYb3wdcQAAAIBuLbB45djZXzUkOTzzcRDIRfhaxo5WipbtEM2B1fuBt2gyrvksPpH/LK6xTjdIIb0CxPu4OCxwJG0aOz5kJoRnOWIXQGhH7VowrJhsqhIc8gN9ErbO5ea8b1L76MNcAotmBDeTUiPw01IJ8MdDxfmcsCslJKgoRKSmQpCwXQtN2g== tomk@hp2", - "label": "mynewkey33" - } - ] - returned: always -''' # NOQA - -import os -import uuid - -from ansible.module_utils.basic import AnsibleModule - -HAS_PACKET_SDK = True -try: - import packet -except ImportError: - HAS_PACKET_SDK = False - - -PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" - - -def serialize_sshkey(sshkey): - sshkey_data = {} - copy_keys = ['id', 'key', 'label', 'fingerprint'] - for name in copy_keys: - sshkey_data[name] = getattr(sshkey, name) - return sshkey_data - - -def is_valid_uuid(myuuid): - try: - val = uuid.UUID(myuuid, version=4) - except ValueError: - return False - return str(val) == myuuid - - -def load_key_string(key_str): - ret_dict = {} - key_str = key_str.strip() - ret_dict['key'] = key_str - cut_key = key_str.split() - if len(cut_key) in [2, 3]: - if len(cut_key) == 3: - ret_dict['label'] = cut_key[2] - else: - raise Exception("Public key %s is in wrong format" % key_str) - return ret_dict - - -def get_sshkey_selector(module): - key_id = module.params.get('id') - if key_id: - if not is_valid_uuid(key_id): - raise Exception("sshkey ID %s is not valid UUID" % key_id) - selecting_fields = ['label', 'fingerprint', 'id', 'key'] - select_dict = {} - for f in selecting_fields: - if module.params.get(f) is not None: - select_dict[f] = module.params.get(f) - - if module.params.get('key_file'): - with open(module.params.get('key_file')) as _file: - loaded_key = load_key_string(_file.read()) - select_dict['key'] = loaded_key['key'] - if module.params.get('label') is None: - if loaded_key.get('label'): - select_dict['label'] = loaded_key['label'] - - def selector(k): - if 'key' in select_dict: - # if key string is specified, compare only the key strings - return k.key == select_dict['key'] - else: - # if key string not specified, all the fields must match - return all(select_dict[f] == getattr(k, f) for f in select_dict) - return selector - - -def act_on_sshkeys(target_state, module, packet_conn): - selector = get_sshkey_selector(module) - existing_sshkeys = packet_conn.list_ssh_keys() - matching_sshkeys = filter(selector, existing_sshkeys) - changed = False - if target_state == 'present': - if matching_sshkeys == []: - # there is no key matching the fields from module call - # => create the key, label and - newkey = {} - if module.params.get('key_file'): - with open(module.params.get('key_file')) as f: - newkey = load_key_string(f.read()) - if module.params.get('key'): - newkey = load_key_string(module.params.get('key')) - if module.params.get('label'): - newkey['label'] = module.params.get('label') - for param in ('label', 'key'): - if param not in newkey: - _msg = ("If you want to ensure a key is present, you must " - "supply both a label and a key string, either in " - "module params, or in a key file. %s is missing" - % param) - raise Exception(_msg) - matching_sshkeys = [] - new_key_response = packet_conn.create_ssh_key( - newkey['label'], newkey['key']) - changed = True - - matching_sshkeys.append(new_key_response) - else: - # state is 'absent' => delete matching keys - for k in matching_sshkeys: - try: - k.delete() - changed = True - except Exception as e: - _msg = ("while trying to remove sshkey %s, id %s %s, " - "got error: %s" % - (k.label, k.id, target_state, e)) - raise Exception(_msg) - - return { - 'changed': changed, - 'sshkeys': [serialize_sshkey(k) for k in matching_sshkeys] - } - - -def main(): - module = AnsibleModule( - argument_spec=dict( - state=dict(choices=['present', 'absent'], default='present'), - auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR), - no_log=True), - label=dict(type='str', aliases=['name'], default=None), - id=dict(type='str', default=None), - fingerprint=dict(type='str', default=None), - key=dict(type='str', default=None, no_log=True), - key_file=dict(type='path', default=None), - ), - mutually_exclusive=[ - ('label', 'id'), - ('label', 'fingerprint'), - ('id', 'fingerprint'), - ('key', 'fingerprint'), - ('key', 'id'), - ('key_file', 'key'), - ] - ) - - if not HAS_PACKET_SDK: - module.fail_json(msg='packet required for this module') - - if not module.params.get('auth_token'): - _fail_msg = ("if Packet API token is not in environment variable %s, " - "the auth_token parameter is required" % - PACKET_API_TOKEN_ENV_VAR) - module.fail_json(msg=_fail_msg) - - auth_token = module.params.get('auth_token') - - packet_conn = packet.Manager(auth_token=auth_token) - - state = module.params.get('state') - - if state in ['present', 'absent']: - try: - module.exit_json(**act_on_sshkeys(state, module, packet_conn)) - except Exception as e: - module.fail_json(msg='failed to set sshkey state: %s' % str(e)) - else: - module.fail_json(msg='%s is not a valid state for this module' % state) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/packet/packet_volume.py b/ansible_collections/community/general/plugins/modules/cloud/packet/packet_volume.py deleted file mode 100644 index 97c1e749..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/packet/packet_volume.py +++ /dev/null @@ -1,321 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Nurfet Becirevic -# Copyright: (c) 2017, Tomas Karasek -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: packet_volume - -short_description: Create/delete a volume in Packet host. - -description: - - Create/delete a volume in Packet host. - - API is documented at U(https://www.packet.com/developers/api/#volumes). - -version_added: '0.2.0' - -author: - - Tomas Karasek (@t0mk) - - Nurfet Becirevic (@nurfet-becirevic) - -options: - state: - description: - - Desired state of the volume. - default: present - choices: ['present', 'absent'] - type: str - - project_id: - description: - - ID of project of the device. - required: true - type: str - - auth_token: - description: - - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN). - type: str - - name: - description: - - Selector for API-generated name of the volume - type: str - - description: - description: - - User-defined description attribute for Packet volume. - - "It is used used as idempotent identifier - if volume with given - description exists, new one is not created." - type: str - - id: - description: - - UUID of a volume. - type: str - - plan: - description: - - storage_1 for standard tier, storage_2 for premium (performance) tier. - - Tiers are described at U(https://www.packet.com/cloud/storage/). - choices: ['storage_1', 'storage_2'] - default: 'storage_1' - type: str - - facility: - description: - - Location of the volume. - - Volumes can only be attached to device in the same location. - type: str - - size: - description: - - Size of the volume in gigabytes. - type: int - - locked: - description: - - Create new volume locked. - type: bool - default: False - - billing_cycle: - description: - - Billing cycle for new volume. - choices: ['hourly', 'monthly'] - default: 'hourly' - type: str - - snapshot_policy: - description: - - Snapshot policy for new volume. - type: dict - - suboptions: - snapshot_count: - description: - - How many snapshots to keep, a positive integer. - required: True - type: int - - snapshot_frequency: - description: - - Frequency of snapshots. - required: True - choices: ["15min", "1hour", "1day", "1week", "1month", "1year"] - type: str - -requirements: - - "python >= 2.6" - - "packet-python >= 1.35" - -''' - -EXAMPLES = ''' -# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. -# You can also pass the api token in module param auth_token. - -- hosts: localhost - vars: - volname: testvol123 - project_id: 53000fb2-ee46-4673-93a8-de2c2bdba33b - - tasks: - - name: Create volume - community.general.packet_volume: - description: "{{ volname }}" - project_id: "{{ project_id }}" - facility: 'ewr1' - plan: 'storage_1' - state: present - size: 10 - snapshot_policy: - snapshot_count: 10 - snapshot_frequency: 1day - register: result_create - - - name: Delete volume - community.general.packet_volume: - id: "{{ result_create.id }}" - project_id: "{{ project_id }}" - state: absent -''' - -RETURN = ''' -id: - description: UUID of specified volume - type: str - returned: success - sample: 53000fb2-ee46-4673-93a8-de2c2bdba33c -name: - description: The API-generated name of the volume resource. - type: str - returned: if volume is attached/detached to/from some device - sample: "volume-a91dc506" -description: - description: The user-defined description of the volume resource. - type: str - returned: success - sample: "Just another volume" -''' - -import uuid - -from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible.module_utils.common.text.converters import to_native - -HAS_PACKET_SDK = True - - -try: - import packet -except ImportError: - HAS_PACKET_SDK = False - - -PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" - -VOLUME_PLANS = ["storage_1", "storage_2"] -VOLUME_STATES = ["present", "absent"] -BILLING = ["hourly", "monthly"] - - -def is_valid_uuid(myuuid): - try: - val = uuid.UUID(myuuid, version=4) - except ValueError: - return False - return str(val) == myuuid - - -def get_volume_selector(module): - if module.params.get('id'): - i = module.params.get('id') - if not is_valid_uuid(i): - raise Exception("Volume ID '{0}' is not a valid UUID".format(i)) - return lambda v: v['id'] == i - elif module.params.get('name'): - n = module.params.get('name') - return lambda v: v['name'] == n - elif module.params.get('description'): - d = module.params.get('description') - return lambda v: v['description'] == d - - -def get_or_fail(params, key): - item = params.get(key) - if item is None: - raise Exception("{0} must be specified for new volume".format(key)) - return item - - -def act_on_volume(target_state, module, packet_conn): - return_dict = {'changed': False} - s = get_volume_selector(module) - project_id = module.params.get("project_id") - api_method = "projects/{0}/storage".format(project_id) - all_volumes = packet_conn.call_api(api_method, "GET")['volumes'] - matching_volumes = [v for v in all_volumes if s(v)] - - if target_state == "present": - if len(matching_volumes) == 0: - params = { - "description": get_or_fail(module.params, "description"), - "size": get_or_fail(module.params, "size"), - "plan": get_or_fail(module.params, "plan"), - "facility": get_or_fail(module.params, "facility"), - "locked": get_or_fail(module.params, "locked"), - "billing_cycle": get_or_fail(module.params, "billing_cycle"), - "snapshot_policies": module.params.get("snapshot_policy"), - } - - new_volume_data = packet_conn.call_api(api_method, "POST", params) - return_dict['changed'] = True - for k in ['id', 'name', 'description']: - return_dict[k] = new_volume_data[k] - - else: - for k in ['id', 'name', 'description']: - return_dict[k] = matching_volumes[0][k] - - else: - if len(matching_volumes) > 1: - _msg = ("More than one volume matches in module call for absent state: {0}".format( - to_native(matching_volumes))) - module.fail_json(msg=_msg) - - if len(matching_volumes) == 1: - volume = matching_volumes[0] - packet_conn.call_api("storage/{0}".format(volume['id']), "DELETE") - return_dict['changed'] = True - for k in ['id', 'name', 'description']: - return_dict[k] = volume[k] - - return return_dict - - -def main(): - module = AnsibleModule( - argument_spec=dict( - id=dict(type='str', default=None), - description=dict(type="str", default=None), - name=dict(type='str', default=None), - state=dict(choices=VOLUME_STATES, default="present"), - auth_token=dict( - type='str', - fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]), - no_log=True - ), - project_id=dict(required=True), - plan=dict(choices=VOLUME_PLANS, default="storage_1"), - facility=dict(type="str"), - size=dict(type="int"), - locked=dict(type="bool", default=False), - snapshot_policy=dict(type='dict', default=None), - billing_cycle=dict(type='str', choices=BILLING, default="hourly"), - ), - supports_check_mode=True, - required_one_of=[("name", "id", "description")], - mutually_exclusive=[ - ('name', 'id'), - ('id', 'description'), - ('name', 'description'), - ] - ) - - if not HAS_PACKET_SDK: - module.fail_json(msg='packet required for this module') - - if not module.params.get('auth_token'): - _fail_msg = ("if Packet API token is not in environment variable {0}, " - "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR)) - module.fail_json(msg=_fail_msg) - - auth_token = module.params.get('auth_token') - - packet_conn = packet.Manager(auth_token=auth_token) - - state = module.params.get('state') - - if state in VOLUME_STATES: - if module.check_mode: - module.exit_json(changed=False) - - try: - module.exit_json(**act_on_volume(state, module, packet_conn)) - except Exception as e: - module.fail_json( - msg="failed to set volume state {0}: {1}".format( - state, to_native(e))) - else: - module.fail_json(msg="{0} is not a valid state for this module".format(state)) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/packet/packet_volume_attachment.py b/ansible_collections/community/general/plugins/modules/cloud/packet/packet_volume_attachment.py deleted file mode 100644 index 9044fbcf..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/packet/packet_volume_attachment.py +++ /dev/null @@ -1,298 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Nurfet Becirevic -# Copyright: (c) 2017, Tomas Karasek -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: packet_volume_attachment - -short_description: Attach/detach a volume to a device in the Packet host. - -description: - - Attach/detach a volume to a device in the Packet host. - - API is documented at U(https://www.packet.com/developers/api/volumes/). - - "This module creates the attachment route in the Packet API. In order to discover - the block devices on the server, you have to run the Attach Scripts, - as documented at U(https://help.packet.net/technical/storage/packet-block-storage-linux)." - -version_added: '0.2.0' - -author: - - Tomas Karasek (@t0mk) - - Nurfet Becirevic (@nurfet-becirevic) - -options: - state: - description: - - Indicate desired state of the attachment. - default: present - choices: ['present', 'absent'] - type: str - - auth_token: - description: - - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN). - type: str - - project_id: - description: - - UUID of the project to which the device and volume belong. - type: str - required: true - - volume: - description: - - Selector for the volume. - - It can be a UUID, an API-generated volume name, or user-defined description string. - - 'Example values: 4a347482-b546-4f67-8300-fb5018ef0c5, volume-4a347482, "my volume"' - type: str - required: true - - device: - description: - - Selector for the device. - - It can be a UUID of the device, or a hostname. - - 'Example values: 98a14f7a-3d27-4478-b7cf-35b5670523f3, "my device"' - type: str - -requirements: - - "python >= 2.6" - - "packet-python >= 1.35" - -''' - -EXAMPLES = ''' -# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. -# You can also pass the api token in module param auth_token. - -- hosts: localhost - - vars: - volname: testvol - devname: testdev - project_id: 52000fb2-ee46-4673-93a8-de2c2bdba33b - - tasks: - - name: Create volume - packet_volume: - description: "{{ volname }}" - project_id: "{{ project_id }}" - facility: ewr1 - plan: storage_1 - state: present - size: 10 - snapshot_policy: - snapshot_count: 10 - snapshot_frequency: 1day - - - name: Create a device - packet_device: - project_id: "{{ project_id }}" - hostnames: "{{ devname }}" - operating_system: ubuntu_16_04 - plan: baremetal_0 - facility: ewr1 - state: present - - - name: Attach testvol to testdev - community.general.packet_volume_attachment: - project_id: "{{ project_id }}" - volume: "{{ volname }}" - device: "{{ devname }}" - - - name: Detach testvol from testdev - community.general.packet_volume_attachment: - project_id: "{{ project_id }}" - volume: "{{ volname }}" - device: "{{ devname }}" - state: absent -''' - -RETURN = ''' -volume_id: - description: UUID of volume addressed by the module call. - type: str - returned: success - -device_id: - description: UUID of device addressed by the module call. - type: str - returned: success -''' - -import uuid - -from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible.module_utils.common.text.converters import to_native - -HAS_PACKET_SDK = True - - -try: - import packet -except ImportError: - HAS_PACKET_SDK = False - - -PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" - -STATES = ["present", "absent"] - - -def is_valid_uuid(myuuid): - try: - val = uuid.UUID(myuuid, version=4) - except ValueError: - return False - return str(val) == myuuid - - -def get_volume_selector(spec): - if is_valid_uuid(spec): - return lambda v: v['id'] == spec - else: - return lambda v: v['name'] == spec or v['description'] == spec - - -def get_device_selector(spec): - if is_valid_uuid(spec): - return lambda v: v['id'] == spec - else: - return lambda v: v['hostname'] == spec - - -def do_attach(packet_conn, vol_id, dev_id): - api_method = "storage/{0}/attachments".format(vol_id) - packet_conn.call_api( - api_method, - params={"device_id": dev_id}, - type="POST") - - -def do_detach(packet_conn, vol, dev_id=None): - def dev_match(a): - return (dev_id is None) or (a['device']['id'] == dev_id) - for a in vol['attachments']: - if dev_match(a): - packet_conn.call_api(a['href'], type="DELETE") - - -def validate_selected(l, resource_type, spec): - if len(l) > 1: - _msg = ("more than one {0} matches specification {1}: {2}".format( - resource_type, spec, l)) - raise Exception(_msg) - if len(l) == 0: - _msg = "no {0} matches specification: {1}".format(resource_type, spec) - raise Exception(_msg) - - -def get_attached_dev_ids(volume_dict): - if len(volume_dict['attachments']) == 0: - return [] - else: - return [a['device']['id'] for a in volume_dict['attachments']] - - -def act_on_volume_attachment(target_state, module, packet_conn): - return_dict = {'changed': False} - volspec = module.params.get("volume") - devspec = module.params.get("device") - if devspec is None and target_state == 'present': - raise Exception("If you want to attach a volume, you must specify a device.") - project_id = module.params.get("project_id") - volumes_api_method = "projects/{0}/storage".format(project_id) - volumes = packet_conn.call_api(volumes_api_method, - params={'include': 'facility,attachments.device'})['volumes'] - v_match = get_volume_selector(volspec) - matching_volumes = [v for v in volumes if v_match(v)] - validate_selected(matching_volumes, "volume", volspec) - volume = matching_volumes[0] - return_dict['volume_id'] = volume['id'] - - device = None - if devspec is not None: - devices_api_method = "projects/{0}/devices".format(project_id) - devices = packet_conn.call_api(devices_api_method)['devices'] - d_match = get_device_selector(devspec) - matching_devices = [d for d in devices if d_match(d)] - validate_selected(matching_devices, "device", devspec) - device = matching_devices[0] - return_dict['device_id'] = device['id'] - - attached_device_ids = get_attached_dev_ids(volume) - - if target_state == "present": - if len(attached_device_ids) == 0: - do_attach(packet_conn, volume['id'], device['id']) - return_dict['changed'] = True - elif device['id'] not in attached_device_ids: - # Don't reattach volume which is attached to a different device. - # Rather fail than force remove a device on state == 'present'. - raise Exception("volume {0} is already attached to device {1}".format( - volume, attached_device_ids)) - else: - if device is None: - if len(attached_device_ids) > 0: - do_detach(packet_conn, volume) - return_dict['changed'] = True - elif device['id'] in attached_device_ids: - do_detach(packet_conn, volume, device['id']) - return_dict['changed'] = True - - return return_dict - - -def main(): - module = AnsibleModule( - argument_spec=dict( - state=dict(choices=STATES, default="present"), - auth_token=dict( - type='str', - fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]), - no_log=True - ), - volume=dict(type="str", required=True), - project_id=dict(type="str", required=True), - device=dict(type="str"), - ), - supports_check_mode=True, - ) - - if not HAS_PACKET_SDK: - module.fail_json(msg='packet required for this module') - - if not module.params.get('auth_token'): - _fail_msg = ("if Packet API token is not in environment variable {0}, " - "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR)) - module.fail_json(msg=_fail_msg) - - auth_token = module.params.get('auth_token') - - packet_conn = packet.Manager(auth_token=auth_token) - - state = module.params.get('state') - - if state in STATES: - if module.check_mode: - module.exit_json(changed=False) - - try: - module.exit_json( - **act_on_volume_attachment(state, module, packet_conn)) - except Exception as e: - module.fail_json( - msg="failed to set volume_attachment state {0}: {1}".format(state, to_native(e))) - else: - module.fail_json(msg="{0} is not a valid state for this module".format(state)) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks.py b/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks.py deleted file mode 100644 index 3a75778a..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks.py +++ /dev/null @@ -1,657 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: profitbricks -short_description: Create, destroy, start, stop, and reboot a ProfitBricks virtual machine. -description: - - Create, destroy, update, start, stop, and reboot a ProfitBricks virtual machine. When the virtual machine is created it can optionally wait - for it to be 'running' before returning. This module has a dependency on profitbricks >= 1.0.0 -options: - auto_increment: - description: - - Whether or not to increment a single number in the name for created virtual machines. - type: bool - default: 'yes' - name: - description: - - The name of the virtual machine. - type: str - image: - description: - - The system image ID for creating the virtual machine, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8. - type: str - image_password: - description: - - Password set for the administrative user. - type: str - ssh_keys: - description: - - Public SSH keys allowing access to the virtual machine. - type: list - elements: str - datacenter: - description: - - The datacenter to provision this virtual machine. - type: str - cores: - description: - - The number of CPU cores to allocate to the virtual machine. - default: 2 - type: int - ram: - description: - - The amount of memory to allocate to the virtual machine. - default: 2048 - type: int - cpu_family: - description: - - The CPU family type to allocate to the virtual machine. - type: str - default: AMD_OPTERON - choices: [ "AMD_OPTERON", "INTEL_XEON" ] - volume_size: - description: - - The size in GB of the boot volume. - type: int - default: 10 - bus: - description: - - The bus type for the volume. - type: str - default: VIRTIO - choices: [ "IDE", "VIRTIO"] - instance_ids: - description: - - list of instance ids, currently only used when state='absent' to remove instances. - type: list - elements: str - count: - description: - - The number of virtual machines to create. - type: int - default: 1 - location: - description: - - The datacenter location. Use only if you want to create the Datacenter or else this value is ignored. - type: str - default: us/las - choices: [ "us/las", "de/fra", "de/fkb" ] - assign_public_ip: - description: - - This will assign the machine to the public LAN. If no LAN exists with public Internet access it is created. - type: bool - default: 'no' - lan: - description: - - The ID of the LAN you wish to add the servers to. - type: int - default: 1 - subscription_user: - description: - - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable. - type: str - subscription_password: - description: - - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable. - type: str - wait: - description: - - wait for the instance to be in state 'running' before returning - type: bool - default: 'yes' - wait_timeout: - description: - - how long before wait gives up, in seconds - type: int - default: 600 - remove_boot_volume: - description: - - remove the bootVolume of the virtual machine you're destroying. - type: bool - default: 'yes' - state: - description: - - create or terminate instances - - 'The choices available are: C(running), C(stopped), C(absent), C(present).' - type: str - default: 'present' - disk_type: - description: - - the type of disk to be allocated. - type: str - choices: [SSD, HDD] - default: HDD - -requirements: - - "profitbricks" - - "python >= 2.6" -author: Matt Baldwin (@baldwinSPC) -''' - -EXAMPLES = ''' - -# Note: These examples do not set authentication details, see the AWS Guide for details. - -# Provisioning example -- name: Create three servers and enumerate their names - community.general.profitbricks: - datacenter: Tardis One - name: web%02d.stackpointcloud.com - cores: 4 - ram: 2048 - volume_size: 50 - cpu_family: INTEL_XEON - image: a3eae284-a2fe-11e4-b187-5f1f641608c8 - location: us/las - count: 3 - assign_public_ip: true - -- name: Remove virtual machines - community.general.profitbricks: - datacenter: Tardis One - instance_ids: - - 'web001.stackpointcloud.com' - - 'web002.stackpointcloud.com' - - 'web003.stackpointcloud.com' - wait_timeout: 500 - state: absent - -- name: Start virtual machines - community.general.profitbricks: - datacenter: Tardis One - instance_ids: - - 'web001.stackpointcloud.com' - - 'web002.stackpointcloud.com' - - 'web003.stackpointcloud.com' - wait_timeout: 500 - state: running - -- name: Stop virtual machines - community.general.profitbricks: - datacenter: Tardis One - instance_ids: - - 'web001.stackpointcloud.com' - - 'web002.stackpointcloud.com' - - 'web003.stackpointcloud.com' - wait_timeout: 500 - state: stopped -''' - -import re -import uuid -import time -import traceback - -HAS_PB_SDK = True - -try: - from profitbricks.client import ProfitBricksService, Volume, Server, Datacenter, NIC, LAN -except ImportError: - HAS_PB_SDK = False - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xrange -from ansible.module_utils.common.text.converters import to_native - - -LOCATIONS = ['us/las', - 'de/fra', - 'de/fkb'] - -uuid_match = re.compile( - r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) - - -def _wait_for_completion(profitbricks, promise, wait_timeout, msg): - if not promise: - return - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - time.sleep(5) - operation_result = profitbricks.get_request( - request_id=promise['requestId'], - status=True) - - if operation_result['metadata']['status'] == "DONE": - return - elif operation_result['metadata']['status'] == "FAILED": - raise Exception( - 'Request failed to complete ' + msg + ' "' + str( - promise['requestId']) + '" to complete.') - - raise Exception( - 'Timed out waiting for async operation ' + msg + ' "' + str( - promise['requestId'] - ) + '" to complete.') - - -def _create_machine(module, profitbricks, datacenter, name): - cores = module.params.get('cores') - ram = module.params.get('ram') - cpu_family = module.params.get('cpu_family') - volume_size = module.params.get('volume_size') - disk_type = module.params.get('disk_type') - image_password = module.params.get('image_password') - ssh_keys = module.params.get('ssh_keys') - bus = module.params.get('bus') - lan = module.params.get('lan') - assign_public_ip = module.params.get('assign_public_ip') - subscription_user = module.params.get('subscription_user') - subscription_password = module.params.get('subscription_password') - location = module.params.get('location') - image = module.params.get('image') - assign_public_ip = module.boolean(module.params.get('assign_public_ip')) - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - - if assign_public_ip: - public_found = False - - lans = profitbricks.list_lans(datacenter) - for lan in lans['items']: - if lan['properties']['public']: - public_found = True - lan = lan['id'] - - if not public_found: - i = LAN( - name='public', - public=True) - - lan_response = profitbricks.create_lan(datacenter, i) - _wait_for_completion(profitbricks, lan_response, - wait_timeout, "_create_machine") - lan = lan_response['id'] - - v = Volume( - name=str(uuid.uuid4()).replace('-', '')[:10], - size=volume_size, - image=image, - image_password=image_password, - ssh_keys=ssh_keys, - disk_type=disk_type, - bus=bus) - - n = NIC( - lan=int(lan) - ) - - s = Server( - name=name, - ram=ram, - cores=cores, - cpu_family=cpu_family, - create_volumes=[v], - nics=[n], - ) - - try: - create_server_response = profitbricks.create_server( - datacenter_id=datacenter, server=s) - - _wait_for_completion(profitbricks, create_server_response, - wait_timeout, "create_virtual_machine") - - server_response = profitbricks.get_server( - datacenter_id=datacenter, - server_id=create_server_response['id'], - depth=3 - ) - except Exception as e: - module.fail_json(msg="failed to create the new server: %s" % str(e)) - else: - return server_response - - -def _startstop_machine(module, profitbricks, datacenter_id, server_id): - state = module.params.get('state') - - try: - if state == 'running': - profitbricks.start_server(datacenter_id, server_id) - else: - profitbricks.stop_server(datacenter_id, server_id) - - return True - except Exception as e: - module.fail_json(msg="failed to start or stop the virtual machine %s at %s: %s" % (server_id, datacenter_id, str(e))) - - -def _create_datacenter(module, profitbricks): - datacenter = module.params.get('datacenter') - location = module.params.get('location') - wait_timeout = module.params.get('wait_timeout') - - i = Datacenter( - name=datacenter, - location=location - ) - - try: - datacenter_response = profitbricks.create_datacenter(datacenter=i) - - _wait_for_completion(profitbricks, datacenter_response, - wait_timeout, "_create_datacenter") - - return datacenter_response - except Exception as e: - module.fail_json(msg="failed to create the new server(s): %s" % str(e)) - - -def create_virtual_machine(module, profitbricks): - """ - Create new virtual machine - - module : AnsibleModule object - community.general.profitbricks: authenticated profitbricks object - - Returns: - True if a new virtual machine was created, false otherwise - """ - datacenter = module.params.get('datacenter') - name = module.params.get('name') - auto_increment = module.params.get('auto_increment') - count = module.params.get('count') - lan = module.params.get('lan') - wait_timeout = module.params.get('wait_timeout') - failed = True - datacenter_found = False - - virtual_machines = [] - virtual_machine_ids = [] - - # Locate UUID for datacenter if referenced by name. - datacenter_list = profitbricks.list_datacenters() - datacenter_id = _get_datacenter_id(datacenter_list, datacenter) - if datacenter_id: - datacenter_found = True - - if not datacenter_found: - datacenter_response = _create_datacenter(module, profitbricks) - datacenter_id = datacenter_response['id'] - - _wait_for_completion(profitbricks, datacenter_response, - wait_timeout, "create_virtual_machine") - - if auto_increment: - numbers = set() - count_offset = 1 - - try: - name % 0 - except TypeError as e: - if e.message.startswith('not all'): - name = '%s%%d' % name - else: - module.fail_json(msg=e.message, exception=traceback.format_exc()) - - number_range = xrange(count_offset, count_offset + count + len(numbers)) - available_numbers = list(set(number_range).difference(numbers)) - names = [] - numbers_to_use = available_numbers[:count] - for number in numbers_to_use: - names.append(name % number) - else: - names = [name] - - # Prefetch a list of servers for later comparison. - server_list = profitbricks.list_servers(datacenter_id) - for name in names: - # Skip server creation if the server already exists. - if _get_server_id(server_list, name): - continue - - create_response = _create_machine(module, profitbricks, str(datacenter_id), name) - nics = profitbricks.list_nics(datacenter_id, create_response['id']) - for n in nics['items']: - if lan == n['properties']['lan']: - create_response.update({'public_ip': n['properties']['ips'][0]}) - - virtual_machines.append(create_response) - - failed = False - - results = { - 'failed': failed, - 'machines': virtual_machines, - 'action': 'create', - 'instance_ids': { - 'instances': [i['id'] for i in virtual_machines], - } - } - - return results - - -def remove_virtual_machine(module, profitbricks): - """ - Removes a virtual machine. - - This will remove the virtual machine along with the bootVolume. - - module : AnsibleModule object - community.general.profitbricks: authenticated profitbricks object. - - Not yet supported: handle deletion of attached data disks. - - Returns: - True if a new virtual server was deleted, false otherwise - """ - datacenter = module.params.get('datacenter') - instance_ids = module.params.get('instance_ids') - remove_boot_volume = module.params.get('remove_boot_volume') - changed = False - - if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1: - module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting') - - # Locate UUID for datacenter if referenced by name. - datacenter_list = profitbricks.list_datacenters() - datacenter_id = _get_datacenter_id(datacenter_list, datacenter) - if not datacenter_id: - module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter)) - - # Prefetch server list for later comparison. - server_list = profitbricks.list_servers(datacenter_id) - for instance in instance_ids: - # Locate UUID for server if referenced by name. - server_id = _get_server_id(server_list, instance) - if server_id: - # Remove the server's boot volume - if remove_boot_volume: - _remove_boot_volume(module, profitbricks, datacenter_id, server_id) - - # Remove the server - try: - server_response = profitbricks.delete_server(datacenter_id, server_id) - except Exception as e: - module.fail_json(msg="failed to terminate the virtual server: %s" % to_native(e), exception=traceback.format_exc()) - else: - changed = True - - return changed - - -def _remove_boot_volume(module, profitbricks, datacenter_id, server_id): - """ - Remove the boot volume from the server - """ - try: - server = profitbricks.get_server(datacenter_id, server_id) - volume_id = server['properties']['bootVolume']['id'] - volume_response = profitbricks.delete_volume(datacenter_id, volume_id) - except Exception as e: - module.fail_json(msg="failed to remove the server's boot volume: %s" % to_native(e), exception=traceback.format_exc()) - - -def startstop_machine(module, profitbricks, state): - """ - Starts or Stops a virtual machine. - - module : AnsibleModule object - community.general.profitbricks: authenticated profitbricks object. - - Returns: - True when the servers process the action successfully, false otherwise. - """ - if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1: - module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting') - - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - changed = False - - datacenter = module.params.get('datacenter') - instance_ids = module.params.get('instance_ids') - - # Locate UUID for datacenter if referenced by name. - datacenter_list = profitbricks.list_datacenters() - datacenter_id = _get_datacenter_id(datacenter_list, datacenter) - if not datacenter_id: - module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter)) - - # Prefetch server list for later comparison. - server_list = profitbricks.list_servers(datacenter_id) - for instance in instance_ids: - # Locate UUID of server if referenced by name. - server_id = _get_server_id(server_list, instance) - if server_id: - _startstop_machine(module, profitbricks, datacenter_id, server_id) - changed = True - - if wait: - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - matched_instances = [] - for res in profitbricks.list_servers(datacenter_id)['items']: - if state == 'running': - if res['properties']['vmState'].lower() == state: - matched_instances.append(res) - elif state == 'stopped': - if res['properties']['vmState'].lower() == 'shutoff': - matched_instances.append(res) - - if len(matched_instances) < len(instance_ids): - time.sleep(5) - else: - break - - if wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg="wait for virtual machine state timeout on %s" % time.asctime()) - - return (changed) - - -def _get_datacenter_id(datacenters, identity): - """ - Fetch and return datacenter UUID by datacenter name if found. - """ - for datacenter in datacenters['items']: - if identity in (datacenter['properties']['name'], datacenter['id']): - return datacenter['id'] - return None - - -def _get_server_id(servers, identity): - """ - Fetch and return server UUID by server name if found. - """ - for server in servers['items']: - if identity in (server['properties']['name'], server['id']): - return server['id'] - return None - - -def main(): - module = AnsibleModule( - argument_spec=dict( - datacenter=dict(), - name=dict(), - image=dict(), - cores=dict(type='int', default=2), - ram=dict(type='int', default=2048), - cpu_family=dict(choices=['AMD_OPTERON', 'INTEL_XEON'], - default='AMD_OPTERON'), - volume_size=dict(type='int', default=10), - disk_type=dict(choices=['HDD', 'SSD'], default='HDD'), - image_password=dict(default=None, no_log=True), - ssh_keys=dict(type='list', elements='str', default=[], no_log=False), - bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'), - lan=dict(type='int', default=1), - count=dict(type='int', default=1), - auto_increment=dict(type='bool', default=True), - instance_ids=dict(type='list', elements='str', default=[]), - subscription_user=dict(), - subscription_password=dict(no_log=True), - location=dict(choices=LOCATIONS, default='us/las'), - assign_public_ip=dict(type='bool', default=False), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - remove_boot_volume=dict(type='bool', default=True), - state=dict(default='present'), - ) - ) - - if not HAS_PB_SDK: - module.fail_json(msg='profitbricks required for this module') - - subscription_user = module.params.get('subscription_user') - subscription_password = module.params.get('subscription_password') - - profitbricks = ProfitBricksService( - username=subscription_user, - password=subscription_password) - - state = module.params.get('state') - - if state == 'absent': - if not module.params.get('datacenter'): - module.fail_json(msg='datacenter parameter is required ' + - 'for running or stopping machines.') - - try: - (changed) = remove_virtual_machine(module, profitbricks) - module.exit_json(changed=changed) - except Exception as e: - module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc()) - - elif state in ('running', 'stopped'): - if not module.params.get('datacenter'): - module.fail_json(msg='datacenter parameter is required for ' + - 'running or stopping machines.') - try: - (changed) = startstop_machine(module, profitbricks, state) - module.exit_json(changed=changed) - except Exception as e: - module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc()) - - elif state == 'present': - if not module.params.get('name'): - module.fail_json(msg='name parameter is required for new instance') - if not module.params.get('image'): - module.fail_json(msg='image parameter is required for new instance') - if not module.params.get('subscription_user'): - module.fail_json(msg='subscription_user parameter is ' + - 'required for new instance') - if not module.params.get('subscription_password'): - module.fail_json(msg='subscription_password parameter is ' + - 'required for new instance') - - try: - (machine_dict_array) = create_virtual_machine(module, profitbricks) - module.exit_json(**machine_dict_array) - except Exception as e: - module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_datacenter.py b/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_datacenter.py deleted file mode 100644 index 7897ffde..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_datacenter.py +++ /dev/null @@ -1,258 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: profitbricks_datacenter -short_description: Create or destroy a ProfitBricks Virtual Datacenter. -description: - - This is a simple module that supports creating or removing vDCs. A vDC is required before you can create servers. This module has a dependency - on profitbricks >= 1.0.0 -options: - name: - description: - - The name of the virtual datacenter. - type: str - description: - description: - - The description of the virtual datacenter. - type: str - required: false - location: - description: - - The datacenter location. - type: str - required: false - default: us/las - choices: [ "us/las", "de/fra", "de/fkb" ] - subscription_user: - description: - - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable. - type: str - required: false - subscription_password: - description: - - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable. - type: str - required: false - wait: - description: - - wait for the datacenter to be created before returning - required: false - default: "yes" - type: bool - wait_timeout: - description: - - how long before wait gives up, in seconds - type: int - default: 600 - state: - description: - - Create or terminate datacenters. - - "The available choices are: C(present), C(absent)." - type: str - required: false - default: 'present' - -requirements: [ "profitbricks" ] -author: Matt Baldwin (@baldwinSPC) -''' - -EXAMPLES = ''' -- name: Create a datacenter - community.general.profitbricks_datacenter: - datacenter: Tardis One - wait_timeout: 500 - -- name: Destroy a datacenter (remove all servers, volumes, and other objects in the datacenter) - community.general.profitbricks_datacenter: - datacenter: Tardis One - wait_timeout: 500 - state: absent -''' - -import re -import time - -HAS_PB_SDK = True -try: - from profitbricks.client import ProfitBricksService, Datacenter -except ImportError: - HAS_PB_SDK = False - -from ansible.module_utils.basic import AnsibleModule - - -LOCATIONS = ['us/las', - 'de/fra', - 'de/fkb'] - -uuid_match = re.compile( - r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) - - -def _wait_for_completion(profitbricks, promise, wait_timeout, msg): - if not promise: - return - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - time.sleep(5) - operation_result = profitbricks.get_request( - request_id=promise['requestId'], - status=True) - - if operation_result['metadata']['status'] == "DONE": - return - elif operation_result['metadata']['status'] == "FAILED": - raise Exception( - 'Request failed to complete ' + msg + ' "' + str( - promise['requestId']) + '" to complete.') - - raise Exception( - 'Timed out waiting for async operation ' + msg + ' "' + str( - promise['requestId'] - ) + '" to complete.') - - -def _remove_datacenter(module, profitbricks, datacenter): - try: - profitbricks.delete_datacenter(datacenter) - except Exception as e: - module.fail_json(msg="failed to remove the datacenter: %s" % str(e)) - - -def create_datacenter(module, profitbricks): - """ - Creates a Datacenter - - This will create a new Datacenter in the specified location. - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if a new datacenter was created, false otherwise - """ - name = module.params.get('name') - location = module.params.get('location') - description = module.params.get('description') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - - i = Datacenter( - name=name, - location=location, - description=description - ) - - try: - datacenter_response = profitbricks.create_datacenter(datacenter=i) - - if wait: - _wait_for_completion(profitbricks, datacenter_response, - wait_timeout, "_create_datacenter") - - results = { - 'datacenter_id': datacenter_response['id'] - } - - return results - - except Exception as e: - module.fail_json(msg="failed to create the new datacenter: %s" % str(e)) - - -def remove_datacenter(module, profitbricks): - """ - Removes a Datacenter. - - This will remove a datacenter. - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if the datacenter was deleted, false otherwise - """ - name = module.params.get('name') - changed = False - - if(uuid_match.match(name)): - _remove_datacenter(module, profitbricks, name) - changed = True - else: - datacenters = profitbricks.list_datacenters() - - for d in datacenters['items']: - vdc = profitbricks.get_datacenter(d['id']) - - if name == vdc['properties']['name']: - name = d['id'] - _remove_datacenter(module, profitbricks, name) - changed = True - - return changed - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(), - description=dict(), - location=dict(choices=LOCATIONS, default='us/las'), - subscription_user=dict(), - subscription_password=dict(no_log=True), - wait=dict(type='bool', default=True), - wait_timeout=dict(default=600, type='int'), - state=dict(default='present'), # @TODO add choices - ) - ) - if not HAS_PB_SDK: - module.fail_json(msg='profitbricks required for this module') - - if not module.params.get('subscription_user'): - module.fail_json(msg='subscription_user parameter is required') - if not module.params.get('subscription_password'): - module.fail_json(msg='subscription_password parameter is required') - - subscription_user = module.params.get('subscription_user') - subscription_password = module.params.get('subscription_password') - - profitbricks = ProfitBricksService( - username=subscription_user, - password=subscription_password) - - state = module.params.get('state') - - if state == 'absent': - if not module.params.get('name'): - module.fail_json(msg='name parameter is required deleting a virtual datacenter.') - - try: - (changed) = remove_datacenter(module, profitbricks) - module.exit_json( - changed=changed) - except Exception as e: - module.fail_json(msg='failed to set datacenter state: %s' % str(e)) - - elif state == 'present': - if not module.params.get('name'): - module.fail_json(msg='name parameter is required for a new datacenter') - if not module.params.get('location'): - module.fail_json(msg='location parameter is required for a new datacenter') - - try: - (datacenter_dict_array) = create_datacenter(module, profitbricks) - module.exit_json(**datacenter_dict_array) - except Exception as e: - module.fail_json(msg='failed to set datacenter state: %s' % str(e)) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_nic.py b/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_nic.py deleted file mode 100644 index 5d98e05e..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_nic.py +++ /dev/null @@ -1,289 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: profitbricks_nic -short_description: Create or Remove a NIC. -description: - - This module allows you to create or restore a volume snapshot. This module has a dependency on profitbricks >= 1.0.0 -options: - datacenter: - description: - - The datacenter in which to operate. - type: str - required: true - server: - description: - - The server name or ID. - type: str - required: true - name: - description: - - The name or ID of the NIC. This is only required on deletes, but not on create. - - If not specified, it defaults to a value based on UUID4. - type: str - lan: - description: - - The LAN to place the NIC on. You can pass a LAN that doesn't exist and it will be created. Required on create. - type: str - subscription_user: - description: - - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable. - type: str - required: true - subscription_password: - description: - - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable. - type: str - required: true - wait: - description: - - wait for the operation to complete before returning - required: false - default: "yes" - type: bool - wait_timeout: - description: - - how long before wait gives up, in seconds - type: int - default: 600 - state: - description: - - Indicate desired state of the resource - - "The available choices are: C(present), C(absent)." - type: str - required: false - default: 'present' - -requirements: [ "profitbricks" ] -author: Matt Baldwin (@baldwinSPC) -''' - -EXAMPLES = ''' -- name: Create a NIC - community.general.profitbricks_nic: - datacenter: Tardis One - server: node002 - lan: 2 - wait_timeout: 500 - state: present - -- name: Remove a NIC - community.general.profitbricks_nic: - datacenter: Tardis One - server: node002 - name: 7341c2454f - wait_timeout: 500 - state: absent -''' - -import re -import uuid -import time - -HAS_PB_SDK = True -try: - from profitbricks.client import ProfitBricksService, NIC -except ImportError: - HAS_PB_SDK = False - -from ansible.module_utils.basic import AnsibleModule - - -uuid_match = re.compile( - r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) - - -def _make_default_name(): - return str(uuid.uuid4()).replace('-', '')[:10] - - -def _wait_for_completion(profitbricks, promise, wait_timeout, msg): - if not promise: - return - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - time.sleep(5) - operation_result = profitbricks.get_request( - request_id=promise['requestId'], - status=True) - - if operation_result['metadata']['status'] == "DONE": - return - elif operation_result['metadata']['status'] == "FAILED": - raise Exception( - 'Request failed to complete ' + msg + ' "' + str( - promise['requestId']) + '" to complete.') - - raise Exception( - 'Timed out waiting for async operation ' + msg + ' "' + str( - promise['requestId'] - ) + '" to complete.') - - -def create_nic(module, profitbricks): - """ - Creates a NIC. - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if the nic creates, false otherwise - """ - datacenter = module.params.get('datacenter') - server = module.params.get('server') - lan = module.params.get('lan') - name = module.params.get('name') - if name is None: - name = _make_default_name() - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - - # Locate UUID for Datacenter - if not (uuid_match.match(datacenter)): - datacenter_list = profitbricks.list_datacenters() - for d in datacenter_list['items']: - dc = profitbricks.get_datacenter(d['id']) - if datacenter == dc['properties']['name']: - datacenter = d['id'] - break - - # Locate UUID for Server - if not (uuid_match.match(server)): - server_list = profitbricks.list_servers(datacenter) - for s in server_list['items']: - if server == s['properties']['name']: - server = s['id'] - break - try: - n = NIC( - name=name, - lan=lan - ) - - nic_response = profitbricks.create_nic(datacenter, server, n) - - if wait: - _wait_for_completion(profitbricks, nic_response, - wait_timeout, "create_nic") - - return nic_response - - except Exception as e: - module.fail_json(msg="failed to create the NIC: %s" % str(e)) - - -def delete_nic(module, profitbricks): - """ - Removes a NIC - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if the NIC was removed, false otherwise - """ - datacenter = module.params.get('datacenter') - server = module.params.get('server') - name = module.params.get('name') - if name is None: - name = _make_default_name() - - # Locate UUID for Datacenter - if not (uuid_match.match(datacenter)): - datacenter_list = profitbricks.list_datacenters() - for d in datacenter_list['items']: - dc = profitbricks.get_datacenter(d['id']) - if datacenter == dc['properties']['name']: - datacenter = d['id'] - break - - # Locate UUID for Server - server_found = False - if not (uuid_match.match(server)): - server_list = profitbricks.list_servers(datacenter) - for s in server_list['items']: - if server == s['properties']['name']: - server_found = True - server = s['id'] - break - - if not server_found: - return False - - # Locate UUID for NIC - nic_found = False - if not (uuid_match.match(name)): - nic_list = profitbricks.list_nics(datacenter, server) - for n in nic_list['items']: - if name == n['properties']['name']: - nic_found = True - name = n['id'] - break - - if not nic_found: - return False - - try: - nic_response = profitbricks.delete_nic(datacenter, server, name) - return nic_response - except Exception as e: - module.fail_json(msg="failed to remove the NIC: %s" % str(e)) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - datacenter=dict(required=True), - server=dict(required=True), - name=dict(), - lan=dict(), - subscription_user=dict(required=True), - subscription_password=dict(required=True, no_log=True), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - state=dict(default='present'), - ), - required_if=( - ('state', 'absent', ['name']), - ('state', 'present', ['lan']), - ) - ) - - if not HAS_PB_SDK: - module.fail_json(msg='profitbricks required for this module') - - subscription_user = module.params.get('subscription_user') - subscription_password = module.params.get('subscription_password') - - profitbricks = ProfitBricksService( - username=subscription_user, - password=subscription_password) - - state = module.params.get('state') - - if state == 'absent': - try: - (changed) = delete_nic(module, profitbricks) - module.exit_json(changed=changed) - except Exception as e: - module.fail_json(msg='failed to set nic state: %s' % str(e)) - - elif state == 'present': - try: - (nic_dict) = create_nic(module, profitbricks) - module.exit_json(nics=nic_dict) # @FIXME changed not calculated? - except Exception as e: - module.fail_json(msg='failed to set nic state: %s' % str(e)) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_volume.py b/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_volume.py deleted file mode 100644 index be1c18b5..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_volume.py +++ /dev/null @@ -1,432 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: profitbricks_volume -short_description: Create or destroy a volume. -description: - - Allows you to create or remove a volume from a ProfitBricks datacenter. This module has a dependency on profitbricks >= 1.0.0 -options: - datacenter: - description: - - The datacenter in which to create the volumes. - type: str - name: - description: - - The name of the volumes. You can enumerate the names using auto_increment. - type: str - size: - description: - - The size of the volume. - type: int - required: false - default: 10 - bus: - description: - - The bus type. - type: str - required: false - default: VIRTIO - choices: [ "IDE", "VIRTIO"] - image: - description: - - The system image ID for the volume, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8. This can also be a snapshot image ID. - type: str - image_password: - description: - - Password set for the administrative user. - type: str - required: false - ssh_keys: - description: - - Public SSH keys allowing access to the virtual machine. - type: list - elements: str - required: false - disk_type: - description: - - The disk type of the volume. - type: str - required: false - default: HDD - choices: [ "HDD", "SSD" ] - licence_type: - description: - - The licence type for the volume. This is used when the image is non-standard. - - "The available choices are: C(LINUX), C(WINDOWS), C(UNKNOWN), C(OTHER)." - type: str - required: false - default: UNKNOWN - count: - description: - - The number of volumes you wish to create. - type: int - required: false - default: 1 - auto_increment: - description: - - Whether or not to increment a single number in the name for created virtual machines. - default: yes - type: bool - instance_ids: - description: - - list of instance ids, currently only used when state='absent' to remove instances. - type: list - elements: str - required: false - subscription_user: - description: - - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable. - type: str - required: false - subscription_password: - description: - - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable. - type: str - required: false - wait: - description: - - wait for the datacenter to be created before returning - required: false - default: "yes" - type: bool - wait_timeout: - description: - - how long before wait gives up, in seconds - type: int - default: 600 - state: - description: - - create or terminate datacenters - - "The available choices are: C(present), C(absent)." - type: str - required: false - default: 'present' - server: - description: - - Server name to attach the volume to. - type: str - -requirements: [ "profitbricks" ] -author: Matt Baldwin (@baldwinSPC) -''' - -EXAMPLES = ''' -- name: Create multiple volumes - community.general.profitbricks_volume: - datacenter: Tardis One - name: vol%02d - count: 5 - auto_increment: yes - wait_timeout: 500 - state: present - -- name: Remove Volumes - community.general.profitbricks_volume: - datacenter: Tardis One - instance_ids: - - 'vol01' - - 'vol02' - wait_timeout: 500 - state: absent -''' - -import re -import time -import traceback - -HAS_PB_SDK = True -try: - from profitbricks.client import ProfitBricksService, Volume -except ImportError: - HAS_PB_SDK = False - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xrange -from ansible.module_utils.common.text.converters import to_native - - -uuid_match = re.compile( - r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) - - -def _wait_for_completion(profitbricks, promise, wait_timeout, msg): - if not promise: - return - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - time.sleep(5) - operation_result = profitbricks.get_request( - request_id=promise['requestId'], - status=True) - - if operation_result['metadata']['status'] == "DONE": - return - elif operation_result['metadata']['status'] == "FAILED": - raise Exception( - 'Request failed to complete ' + msg + ' "' + str( - promise['requestId']) + '" to complete.') - - raise Exception( - 'Timed out waiting for async operation ' + msg + ' "' + str( - promise['requestId'] - ) + '" to complete.') - - -def _create_volume(module, profitbricks, datacenter, name): - size = module.params.get('size') - bus = module.params.get('bus') - image = module.params.get('image') - image_password = module.params.get('image_password') - ssh_keys = module.params.get('ssh_keys') - disk_type = module.params.get('disk_type') - licence_type = module.params.get('licence_type') - wait_timeout = module.params.get('wait_timeout') - wait = module.params.get('wait') - - try: - v = Volume( - name=name, - size=size, - bus=bus, - image=image, - image_password=image_password, - ssh_keys=ssh_keys, - disk_type=disk_type, - licence_type=licence_type - ) - - volume_response = profitbricks.create_volume(datacenter, v) - - if wait: - _wait_for_completion(profitbricks, volume_response, - wait_timeout, "_create_volume") - - except Exception as e: - module.fail_json(msg="failed to create the volume: %s" % str(e)) - - return volume_response - - -def _delete_volume(module, profitbricks, datacenter, volume): - try: - profitbricks.delete_volume(datacenter, volume) - except Exception as e: - module.fail_json(msg="failed to remove the volume: %s" % str(e)) - - -def create_volume(module, profitbricks): - """ - Creates a volume. - - This will create a volume in a datacenter. - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if the volume was created, false otherwise - """ - datacenter = module.params.get('datacenter') - name = module.params.get('name') - auto_increment = module.params.get('auto_increment') - count = module.params.get('count') - - datacenter_found = False - failed = True - volumes = [] - - # Locate UUID for Datacenter - if not (uuid_match.match(datacenter)): - datacenter_list = profitbricks.list_datacenters() - for d in datacenter_list['items']: - dc = profitbricks.get_datacenter(d['id']) - if datacenter == dc['properties']['name']: - datacenter = d['id'] - datacenter_found = True - break - - if not datacenter_found: - module.fail_json(msg='datacenter could not be found.') - - if auto_increment: - numbers = set() - count_offset = 1 - - try: - name % 0 - except TypeError as e: - if e.message.startswith('not all'): - name = '%s%%d' % name - else: - module.fail_json(msg=e.message, exception=traceback.format_exc()) - - number_range = xrange(count_offset, count_offset + count + len(numbers)) - available_numbers = list(set(number_range).difference(numbers)) - names = [] - numbers_to_use = available_numbers[:count] - for number in numbers_to_use: - names.append(name % number) - else: - names = [name] * count - - for name in names: - create_response = _create_volume(module, profitbricks, str(datacenter), name) - volumes.append(create_response) - _attach_volume(module, profitbricks, datacenter, create_response['id']) - failed = False - - results = { - 'failed': failed, - 'volumes': volumes, - 'action': 'create', - 'instance_ids': { - 'instances': [i['id'] for i in volumes], - } - } - - return results - - -def delete_volume(module, profitbricks): - """ - Removes a volume. - - This will create a volume in a datacenter. - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if the volume was removed, false otherwise - """ - if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1: - module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting') - - datacenter = module.params.get('datacenter') - changed = False - instance_ids = module.params.get('instance_ids') - - # Locate UUID for Datacenter - if not (uuid_match.match(datacenter)): - datacenter_list = profitbricks.list_datacenters() - for d in datacenter_list['items']: - dc = profitbricks.get_datacenter(d['id']) - if datacenter == dc['properties']['name']: - datacenter = d['id'] - break - - for n in instance_ids: - if(uuid_match.match(n)): - _delete_volume(module, profitbricks, datacenter, n) - changed = True - else: - volumes = profitbricks.list_volumes(datacenter) - for v in volumes['items']: - if n == v['properties']['name']: - volume_id = v['id'] - _delete_volume(module, profitbricks, datacenter, volume_id) - changed = True - - return changed - - -def _attach_volume(module, profitbricks, datacenter, volume): - """ - Attaches a volume. - - This will attach a volume to the server. - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if the volume was attached, false otherwise - """ - server = module.params.get('server') - - # Locate UUID for Server - if server: - if not (uuid_match.match(server)): - server_list = profitbricks.list_servers(datacenter) - for s in server_list['items']: - if server == s['properties']['name']: - server = s['id'] - break - - try: - return profitbricks.attach_volume(datacenter, server, volume) - except Exception as e: - module.fail_json(msg='failed to attach volume: %s' % to_native(e), exception=traceback.format_exc()) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - datacenter=dict(), - server=dict(), - name=dict(), - size=dict(type='int', default=10), - bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'), - image=dict(), - image_password=dict(no_log=True), - ssh_keys=dict(type='list', elements='str', default=[], no_log=False), - disk_type=dict(choices=['HDD', 'SSD'], default='HDD'), - licence_type=dict(default='UNKNOWN'), - count=dict(type='int', default=1), - auto_increment=dict(type='bool', default=True), - instance_ids=dict(type='list', elements='str', default=[]), - subscription_user=dict(), - subscription_password=dict(no_log=True), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - state=dict(default='present'), - ) - ) - - if not module.params.get('subscription_user'): - module.fail_json(msg='subscription_user parameter is required') - if not module.params.get('subscription_password'): - module.fail_json(msg='subscription_password parameter is required') - - subscription_user = module.params.get('subscription_user') - subscription_password = module.params.get('subscription_password') - - profitbricks = ProfitBricksService( - username=subscription_user, - password=subscription_password) - - state = module.params.get('state') - - if state == 'absent': - if not module.params.get('datacenter'): - module.fail_json(msg='datacenter parameter is required for running or stopping machines.') - - try: - (changed) = delete_volume(module, profitbricks) - module.exit_json(changed=changed) - except Exception as e: - module.fail_json(msg='failed to set volume state: %s' % to_native(e), exception=traceback.format_exc()) - - elif state == 'present': - if not module.params.get('datacenter'): - module.fail_json(msg='datacenter parameter is required for new instance') - if not module.params.get('name'): - module.fail_json(msg='name parameter is required for new instance') - - try: - (volume_dict_array) = create_volume(module, profitbricks) - module.exit_json(**volume_dict_array) - except Exception as e: - module.fail_json(msg='failed to set volume state: %s' % to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py b/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py deleted file mode 100644 index 1fb3f3c0..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: profitbricks_volume_attachments -short_description: Attach or detach a volume. -description: - - Allows you to attach or detach a volume from a ProfitBricks server. This module has a dependency on profitbricks >= 1.0.0 -options: - datacenter: - description: - - The datacenter in which to operate. - type: str - server: - description: - - The name of the server you wish to detach or attach the volume. - type: str - volume: - description: - - The volume name or ID. - type: str - subscription_user: - description: - - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable. - type: str - required: false - subscription_password: - description: - - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable. - type: str - required: false - wait: - description: - - wait for the operation to complete before returning - required: false - default: "yes" - type: bool - wait_timeout: - description: - - how long before wait gives up, in seconds - type: int - default: 600 - state: - description: - - Indicate desired state of the resource - - "The available choices are: C(present), C(absent)." - type: str - required: false - default: 'present' - -requirements: [ "profitbricks" ] -author: Matt Baldwin (@baldwinSPC) -''' - -EXAMPLES = ''' -- name: Attach a volume - community.general.profitbricks_volume_attachments: - datacenter: Tardis One - server: node002 - volume: vol01 - wait_timeout: 500 - state: present - -- name: Detach a volume - community.general.profitbricks_volume_attachments: - datacenter: Tardis One - server: node002 - volume: vol01 - wait_timeout: 500 - state: absent -''' - -import re -import time - -HAS_PB_SDK = True -try: - from profitbricks.client import ProfitBricksService -except ImportError: - HAS_PB_SDK = False - -from ansible.module_utils.basic import AnsibleModule - - -uuid_match = re.compile( - r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) - - -def _wait_for_completion(profitbricks, promise, wait_timeout, msg): - if not promise: - return - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - time.sleep(5) - operation_result = profitbricks.get_request( - request_id=promise['requestId'], - status=True) - - if operation_result['metadata']['status'] == "DONE": - return - elif operation_result['metadata']['status'] == "FAILED": - raise Exception( - 'Request failed to complete ' + msg + ' "' + str( - promise['requestId']) + '" to complete.') - - raise Exception( - 'Timed out waiting for async operation ' + msg + ' "' + str( - promise['requestId'] - ) + '" to complete.') - - -def attach_volume(module, profitbricks): - """ - Attaches a volume. - - This will attach a volume to the server. - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if the volume was attached, false otherwise - """ - datacenter = module.params.get('datacenter') - server = module.params.get('server') - volume = module.params.get('volume') - - # Locate UUID for Datacenter - if not (uuid_match.match(datacenter)): - datacenter_list = profitbricks.list_datacenters() - for d in datacenter_list['items']: - dc = profitbricks.get_datacenter(d['id']) - if datacenter == dc['properties']['name']: - datacenter = d['id'] - break - - # Locate UUID for Server - if not (uuid_match.match(server)): - server_list = profitbricks.list_servers(datacenter) - for s in server_list['items']: - if server == s['properties']['name']: - server = s['id'] - break - - # Locate UUID for Volume - if not (uuid_match.match(volume)): - volume_list = profitbricks.list_volumes(datacenter) - for v in volume_list['items']: - if volume == v['properties']['name']: - volume = v['id'] - break - - return profitbricks.attach_volume(datacenter, server, volume) - - -def detach_volume(module, profitbricks): - """ - Detaches a volume. - - This will remove a volume from the server. - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if the volume was detached, false otherwise - """ - datacenter = module.params.get('datacenter') - server = module.params.get('server') - volume = module.params.get('volume') - - # Locate UUID for Datacenter - if not (uuid_match.match(datacenter)): - datacenter_list = profitbricks.list_datacenters() - for d in datacenter_list['items']: - dc = profitbricks.get_datacenter(d['id']) - if datacenter == dc['properties']['name']: - datacenter = d['id'] - break - - # Locate UUID for Server - if not (uuid_match.match(server)): - server_list = profitbricks.list_servers(datacenter) - for s in server_list['items']: - if server == s['properties']['name']: - server = s['id'] - break - - # Locate UUID for Volume - if not (uuid_match.match(volume)): - volume_list = profitbricks.list_volumes(datacenter) - for v in volume_list['items']: - if volume == v['properties']['name']: - volume = v['id'] - break - - return profitbricks.detach_volume(datacenter, server, volume) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - datacenter=dict(), - server=dict(), - volume=dict(), - subscription_user=dict(), - subscription_password=dict(no_log=True), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - state=dict(default='present'), - ) - ) - - if not HAS_PB_SDK: - module.fail_json(msg='profitbricks required for this module') - - if not module.params.get('subscription_user'): - module.fail_json(msg='subscription_user parameter is required') - if not module.params.get('subscription_password'): - module.fail_json(msg='subscription_password parameter is required') - if not module.params.get('datacenter'): - module.fail_json(msg='datacenter parameter is required') - if not module.params.get('server'): - module.fail_json(msg='server parameter is required') - if not module.params.get('volume'): - module.fail_json(msg='volume parameter is required') - - subscription_user = module.params.get('subscription_user') - subscription_password = module.params.get('subscription_password') - - profitbricks = ProfitBricksService( - username=subscription_user, - password=subscription_password) - - state = module.params.get('state') - - if state == 'absent': - try: - (changed) = detach_volume(module, profitbricks) - module.exit_json(changed=changed) - except Exception as e: - module.fail_json(msg='failed to set volume_attach state: %s' % str(e)) - elif state == 'present': - try: - attach_volume(module, profitbricks) - module.exit_json() - except Exception as e: - module.fail_json(msg='failed to set volume_attach state: %s' % str(e)) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/pubnub/pubnub_blocks.py b/ansible_collections/community/general/plugins/modules/cloud/pubnub/pubnub_blocks.py deleted file mode 100644 index d3b76337..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/pubnub/pubnub_blocks.py +++ /dev/null @@ -1,628 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# PubNub Real-time Cloud-Hosted Push API and Push Notification Client -# Frameworks -# Copyright (C) 2016 PubNub Inc. -# http://www.pubnub.com/ -# http://www.pubnub.com/terms -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: pubnub_blocks -short_description: PubNub blocks management module. -description: - - "This module allows Ansible to interface with the PubNub BLOCKS - infrastructure by providing the following operations: create / remove, - start / stop and rename for blocks and create / modify / remove for event - handlers" -author: - - PubNub (@pubnub) - - Sergey Mamontov (@parfeon) -requirements: - - "python >= 2.7" - - "pubnub_blocks_client >= 1.0" -options: - email: - description: - - Email from account for which new session should be started. - - "Not required if C(cache) contains result of previous module call (in - same play)." - required: false - type: str - password: - description: - - Password which match to account to which specified C(email) belong. - - "Not required if C(cache) contains result of previous module call (in - same play)." - required: false - type: str - cache: - description: > - In case if single play use blocks management module few times it is - preferred to enabled 'caching' by making previous module to share - gathered artifacts and pass them to this parameter. - required: false - type: dict - default: {} - account: - description: - - "Name of PubNub account for from which C(application) will be used to - manage blocks." - - "User's account will be used if value not set or empty." - type: str - required: false - application: - description: - - "Name of target PubNub application for which blocks configuration on - specific C(keyset) will be done." - type: str - required: true - keyset: - description: - - Name of application's keys set which is bound to managed blocks. - type: str - required: true - state: - description: - - "Intended block state after event handlers creation / update process - will be completed." - required: false - default: 'present' - choices: ['started', 'stopped', 'present', 'absent'] - type: str - name: - description: - - Name of managed block which will be later visible on admin.pubnub.com. - required: true - type: str - description: - description: - - Short block description which will be later visible on - admin.pubnub.com. Used only if block doesn't exists and won't change - description for existing block. - required: false - type: str - event_handlers: - description: - - "List of event handlers which should be updated for specified block - C(name)." - - "Each entry for new event handler should contain: C(name), C(src), - C(channels), C(event). C(name) used as event handler name which can be - used later to make changes to it." - - C(src) is full path to file with event handler code. - - "C(channels) is name of channel from which event handler is waiting - for events." - - "C(event) is type of event which is able to trigger event handler: - I(js-before-publish), I(js-after-publish), I(js-after-presence)." - - "Each entry for existing handlers should contain C(name) (so target - handler can be identified). Rest parameters (C(src), C(channels) and - C(event)) can be added if changes required for them." - - "It is possible to rename event handler by adding C(changes) key to - event handler payload and pass dictionary, which will contain single key - C(name), where new name should be passed." - - "To remove particular event handler it is possible to set C(state) for - it to C(absent) and it will be removed." - required: false - default: [] - type: list - elements: dict - changes: - description: - - "List of fields which should be changed by block itself (doesn't - affect any event handlers)." - - "Possible options for change is: C(name)." - required: false - default: {} - type: dict - validate_certs: - description: - - "This key allow to try skip certificates check when performing REST API - calls. Sometimes host may have issues with certificates on it and this - will cause problems to call PubNub REST API." - - If check should be ignored C(False) should be passed to this parameter. - required: false - default: true - type: bool -''' - -EXAMPLES = ''' -# Event handler create example. -- name: Create single event handler - community.general.pubnub_blocks: - email: '{{ email }}' - password: '{{ password }}' - application: '{{ app_name }}' - keyset: '{{ keyset_name }}' - name: '{{ block_name }}' - event_handlers: - - - src: '{{ path_to_handler_source }}' - name: '{{ handler_name }}' - event: 'js-before-publish' - channels: '{{ handler_channel }}' - -# Change event handler trigger event type. -- name: Change event handler 'event' - community.general.pubnub_blocks: - email: '{{ email }}' - password: '{{ password }}' - application: '{{ app_name }}' - keyset: '{{ keyset_name }}' - name: '{{ block_name }}' - event_handlers: - - - name: '{{ handler_name }}' - event: 'js-after-publish' - -# Stop block and event handlers. -- name: Stopping block - community.general.pubnub_blocks: - email: '{{ email }}' - password: '{{ password }}' - application: '{{ app_name }}' - keyset: '{{ keyset_name }}' - name: '{{ block_name }}' - state: stop - -# Multiple module calls with cached result passing -- name: Create '{{ block_name }}' block - register: module_cache - community.general.pubnub_blocks: - email: '{{ email }}' - password: '{{ password }}' - application: '{{ app_name }}' - keyset: '{{ keyset_name }}' - name: '{{ block_name }}' - state: present -- name: Add '{{ event_handler_1_name }}' handler to '{{ block_name }}' - register: module_cache - community.general.pubnub_blocks: - cache: '{{ module_cache }}' - application: '{{ app_name }}' - keyset: '{{ keyset_name }}' - name: '{{ block_name }}' - state: present - event_handlers: - - - src: '{{ path_to_handler_1_source }}' - name: '{{ event_handler_1_name }}' - channels: '{{ event_handler_1_channel }}' - event: 'js-before-publish' -- name: Add '{{ event_handler_2_name }}' handler to '{{ block_name }}' - register: module_cache - community.general.pubnub_blocks: - cache: '{{ module_cache }}' - application: '{{ app_name }}' - keyset: '{{ keyset_name }}' - name: '{{ block_name }}' - state: present - event_handlers: - - - src: '{{ path_to_handler_2_source }}' - name: '{{ event_handler_2_name }}' - channels: '{{ event_handler_2_channel }}' - event: 'js-before-publish' -- name: Start '{{ block_name }}' block - register: module_cache - community.general.pubnub_blocks: - cache: '{{ module_cache }}' - application: '{{ app_name }}' - keyset: '{{ keyset_name }}' - name: '{{ block_name }}' - state: started -''' - -RETURN = ''' -module_cache: - description: "Cached account information. In case if with single play module - used few times it is better to pass cached data to next module calls to speed - up process." - type: dict - returned: always -''' -import copy -import os - -try: - # Import PubNub BLOCKS client. - from pubnub_blocks_client import User, Account, Owner, Application, Keyset - from pubnub_blocks_client import Block, EventHandler - from pubnub_blocks_client import exceptions - HAS_PUBNUB_BLOCKS_CLIENT = True -except ImportError: - HAS_PUBNUB_BLOCKS_CLIENT = False - User = None - Account = None - Owner = None - Application = None - Keyset = None - Block = None - EventHandler = None - exceptions = None - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_text - - -def pubnub_user(module): - """Create and configure user model if it possible. - - :type module: AnsibleModule - :param module: Reference on module which contain module launch - information and status report methods. - - :rtype: User - :return: Reference on initialized and ready to use user or 'None' in - case if not all required information has been passed to block. - """ - user = None - params = module.params - - if params.get('cache') and params['cache'].get('module_cache'): - cache = params['cache']['module_cache'] - user = User() - user.restore(cache=copy.deepcopy(cache['pnm_user'])) - elif params.get('email') and params.get('password'): - user = User(email=params.get('email'), password=params.get('password')) - else: - err_msg = 'It looks like not account credentials has been passed or ' \ - '\'cache\' field doesn\'t have result of previous module ' \ - 'call.' - module.fail_json(msg='Missing account credentials.', - description=err_msg, changed=False) - - return user - - -def pubnub_account(module, user): - """Create and configure account if it is possible. - - :type module: AnsibleModule - :param module: Reference on module which contain module launch - information and status report methods. - :type user: User - :param user: Reference on authorized user for which one of accounts - should be used during manipulations with block. - - :rtype: Account - :return: Reference on initialized and ready to use account or 'None' in - case if not all required information has been passed to block. - """ - params = module.params - if params.get('account'): - account_name = params.get('account') - account = user.account(name=params.get('account')) - if account is None: - err_frmt = 'It looks like there is no \'{0}\' account for ' \ - 'authorized user. Please make sure what correct ' \ - 'name has been passed during module configuration.' - module.fail_json(msg='Missing account.', - description=err_frmt.format(account_name), - changed=False) - else: - account = user.accounts()[0] - - return account - - -def pubnub_application(module, account): - """Retrieve reference on target application from account model. - - NOTE: In case if account authorization will fail or there is no - application with specified name, module will exit with error. - :type module: AnsibleModule - :param module: Reference on module which contain module launch - information and status report methods. - :type account: Account - :param account: Reference on PubNub account model from which reference - on application should be fetched. - - :rtype: Application - :return: Reference on initialized and ready to use application model. - """ - application = None - params = module.params - try: - application = account.application(params['application']) - except (exceptions.AccountError, exceptions.GeneralPubNubError) as exc: - exc_msg = _failure_title_from_exception(exc) - exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0] - module.fail_json(msg=exc_msg, description=exc_descr, - changed=account.changed, - module_cache=dict(account)) - - if application is None: - err_fmt = 'There is no \'{0}\' application for {1}. Make sure what ' \ - 'correct application name has been passed. If application ' \ - 'doesn\'t exist you can create it on admin.pubnub.com.' - email = account.owner.email - module.fail_json(msg=err_fmt.format(params['application'], email), - changed=account.changed, module_cache=dict(account)) - - return application - - -def pubnub_keyset(module, account, application): - """Retrieve reference on target keyset from application model. - - NOTE: In case if there is no keyset with specified name, module will - exit with error. - :type module: AnsibleModule - :param module: Reference on module which contain module launch - information and status report methods. - :type account: Account - :param account: Reference on PubNub account model which will be - used in case of error to export cached data. - :type application: Application - :param application: Reference on PubNub application model from which - reference on keyset should be fetched. - - :rtype: Keyset - :return: Reference on initialized and ready to use keyset model. - """ - params = module.params - keyset = application.keyset(params['keyset']) - if keyset is None: - err_fmt = 'There is no \'{0}\' keyset for \'{1}\' application. Make ' \ - 'sure what correct keyset name has been passed. If keyset ' \ - 'doesn\'t exist you can create it on admin.pubnub.com.' - module.fail_json(msg=err_fmt.format(params['keyset'], - application.name), - changed=account.changed, module_cache=dict(account)) - - return keyset - - -def pubnub_block(module, account, keyset): - """Retrieve reference on target keyset from application model. - - NOTE: In case if there is no block with specified name and module - configured to start/stop it, module will exit with error. - :type module: AnsibleModule - :param module: Reference on module which contain module launch - information and status report methods. - :type account: Account - :param account: Reference on PubNub account model which will be used in - case of error to export cached data. - :type keyset: Keyset - :param keyset: Reference on keyset model from which reference on block - should be fetched. - - :rtype: Block - :return: Reference on initialized and ready to use keyset model. - """ - block = None - params = module.params - try: - block = keyset.block(params['name']) - except (exceptions.KeysetError, exceptions.GeneralPubNubError) as exc: - exc_msg = _failure_title_from_exception(exc) - exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0] - module.fail_json(msg=exc_msg, description=exc_descr, - changed=account.changed, module_cache=dict(account)) - - # Report error because block doesn't exists and at the same time - # requested to start/stop. - if block is None and params['state'] in ['started', 'stopped']: - block_name = params.get('name') - module.fail_json(msg="'{0}' block doesn't exists.".format(block_name), - changed=account.changed, module_cache=dict(account)) - - if block is None and params['state'] == 'present': - block = Block(name=params.get('name'), - description=params.get('description')) - keyset.add_block(block) - - if block: - # Update block information if required. - if params.get('changes') and params['changes'].get('name'): - block.name = params['changes']['name'] - if params.get('description'): - block.description = params.get('description') - - return block - - -def pubnub_event_handler(block, data): - """Retrieve reference on target event handler from application model. - - :type block: Block - :param block: Reference on block model from which reference on event - handlers should be fetched. - :type data: dict - :param data: Reference on dictionary which contain information about - event handler and whether it should be created or not. - - :rtype: EventHandler - :return: Reference on initialized and ready to use event handler model. - 'None' will be returned in case if there is no handler with - specified name and no request to create it. - """ - event_handler = block.event_handler(data['name']) - - # Prepare payload for event handler update. - changed_name = (data.pop('changes').get('name') - if 'changes' in data else None) - name = data.get('name') or changed_name - channels = data.get('channels') - event = data.get('event') - code = _content_of_file_at_path(data.get('src')) - state = data.get('state') or 'present' - - # Create event handler if required. - if event_handler is None and state == 'present': - event_handler = EventHandler(name=name, channels=channels, event=event, - code=code) - block.add_event_handler(event_handler) - - # Update event handler if required. - if event_handler is not None and state == 'present': - if name is not None: - event_handler.name = name - if channels is not None: - event_handler.channels = channels - if event is not None: - event_handler.event = event - if code is not None: - event_handler.code = code - - return event_handler - - -def _failure_title_from_exception(exception): - """Compose human-readable title for module error title. - - Title will be based on status codes if they has been provided. - :type exception: exceptions.GeneralPubNubError - :param exception: Reference on exception for which title should be - composed. - - :rtype: str - :return: Reference on error tile which should be shown on module - failure. - """ - title = 'General REST API access error.' - if exception.code == exceptions.PN_AUTHORIZATION_MISSING_CREDENTIALS: - title = 'Authorization error: missing credentials.' - elif exception.code == exceptions.PN_AUTHORIZATION_WRONG_CREDENTIALS: - title = 'Authorization error: wrong credentials.' - elif exception.code == exceptions.PN_USER_INSUFFICIENT_RIGHTS: - title = 'API access error: insufficient access rights.' - elif exception.code == exceptions.PN_API_ACCESS_TOKEN_EXPIRED: - title = 'API access error: time token expired.' - elif exception.code == exceptions.PN_KEYSET_BLOCK_EXISTS: - title = 'Block create did fail: block with same name already exists).' - elif exception.code == exceptions.PN_KEYSET_BLOCKS_FETCH_DID_FAIL: - title = 'Unable fetch list of blocks for keyset.' - elif exception.code == exceptions.PN_BLOCK_CREATE_DID_FAIL: - title = 'Block creation did fail.' - elif exception.code == exceptions.PN_BLOCK_UPDATE_DID_FAIL: - title = 'Block update did fail.' - elif exception.code == exceptions.PN_BLOCK_REMOVE_DID_FAIL: - title = 'Block removal did fail.' - elif exception.code == exceptions.PN_BLOCK_START_STOP_DID_FAIL: - title = 'Block start/stop did fail.' - elif exception.code == exceptions.PN_EVENT_HANDLER_MISSING_FIELDS: - title = 'Event handler creation did fail: missing fields.' - elif exception.code == exceptions.PN_BLOCK_EVENT_HANDLER_EXISTS: - title = 'Event handler creation did fail: missing fields.' - elif exception.code == exceptions.PN_EVENT_HANDLER_CREATE_DID_FAIL: - title = 'Event handler creation did fail.' - elif exception.code == exceptions.PN_EVENT_HANDLER_UPDATE_DID_FAIL: - title = 'Event handler update did fail.' - elif exception.code == exceptions.PN_EVENT_HANDLER_REMOVE_DID_FAIL: - title = 'Event handler removal did fail.' - - return title - - -def _content_of_file_at_path(path): - """Read file content. - - Try read content of file at specified path. - :type path: str - :param path: Full path to location of file which should be read'ed. - :rtype: content - :return: File content or 'None' - """ - content = None - if path and os.path.exists(path): - with open(path, mode="rt") as opened_file: - b_content = opened_file.read() - try: - content = to_text(b_content, errors='surrogate_or_strict') - except UnicodeError: - pass - - return content - - -def main(): - fields = dict( - email=dict(default='', required=False, type='str'), - password=dict(default='', required=False, type='str', no_log=True), - account=dict(default='', required=False, type='str'), - application=dict(required=True, type='str'), - keyset=dict(required=True, type='str', no_log=False), - state=dict(default='present', type='str', - choices=['started', 'stopped', 'present', 'absent']), - name=dict(required=True, type='str'), description=dict(type='str'), - event_handlers=dict(default=list(), type='list', elements='dict'), - changes=dict(default=dict(), type='dict'), - cache=dict(default=dict(), type='dict'), - validate_certs=dict(default=True, type='bool')) - module = AnsibleModule(argument_spec=fields, supports_check_mode=True) - - if not HAS_PUBNUB_BLOCKS_CLIENT: - module.fail_json(msg='pubnub_blocks_client required for this module.') - - params = module.params - - # Authorize user. - user = pubnub_user(module) - # Initialize PubNub account instance. - account = pubnub_account(module, user=user) - # Try fetch application with which module should work. - application = pubnub_application(module, account=account) - # Try fetch keyset with which module should work. - keyset = pubnub_keyset(module, account=account, application=application) - # Try fetch block with which module should work. - block = pubnub_block(module, account=account, keyset=keyset) - is_new_block = block is not None and block.uid == -1 - - # Check whether block should be removed or not. - if block is not None and params['state'] == 'absent': - keyset.remove_block(block) - block = None - - if block is not None: - # Update block information if required. - if params.get('changes') and params['changes'].get('name'): - block.name = params['changes']['name'] - - # Process event changes to event handlers. - for event_handler_data in params.get('event_handlers') or list(): - state = event_handler_data.get('state') or 'present' - event_handler = pubnub_event_handler(data=event_handler_data, - block=block) - if state == 'absent' and event_handler: - block.delete_event_handler(event_handler) - - # Update block operation state if required. - if block and not is_new_block: - if params['state'] == 'started': - block.start() - elif params['state'] == 'stopped': - block.stop() - - # Save current account state. - if not module.check_mode: - try: - account.save() - except (exceptions.APIAccessError, exceptions.KeysetError, - exceptions.BlockError, exceptions.EventHandlerError, - exceptions.GeneralPubNubError) as exc: - module_cache = dict(account) - module_cache.update(dict(pnm_user=dict(user))) - exc_msg = _failure_title_from_exception(exc) - exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0] - module.fail_json(msg=exc_msg, description=exc_descr, - changed=account.changed, - module_cache=module_cache) - - # Report module execution results. - module_cache = dict(account) - module_cache.update(dict(pnm_user=dict(user))) - changed_will_change = account.changed or account.will_change - module.exit_json(changed=changed_will_change, module_cache=module_cache) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax.py deleted file mode 100644 index 8c452d9d..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax.py +++ /dev/null @@ -1,892 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax -short_description: create / delete an instance in Rackspace Public Cloud -description: - - creates / deletes a Rackspace Public Cloud instance and optionally - waits for it to be 'running'. -options: - auto_increment: - description: - - Whether or not to increment a single number with the name of the - created servers. Only applicable when used with the I(group) attribute - or meta key. - type: bool - default: 'yes' - boot_from_volume: - description: - - Whether or not to boot the instance from a Cloud Block Storage volume. - If C(yes) and I(image) is specified a new volume will be created at - boot time. I(boot_volume_size) is required with I(image) to create a - new volume at boot time. - type: bool - default: 'no' - boot_volume: - type: str - description: - - Cloud Block Storage ID or Name to use as the boot volume of the - instance - boot_volume_size: - type: int - description: - - Size of the volume to create in Gigabytes. This is only required with - I(image) and I(boot_from_volume). - default: 100 - boot_volume_terminate: - description: - - Whether the I(boot_volume) or newly created volume from I(image) will - be terminated when the server is terminated - type: bool - default: 'no' - config_drive: - description: - - Attach read-only configuration drive to server as label config-2 - type: bool - default: 'no' - count: - type: int - description: - - number of instances to launch - default: 1 - count_offset: - type: int - description: - - number count to start at - default: 1 - disk_config: - type: str - description: - - Disk partitioning strategy - - If not specified it will assume the value C(auto). - choices: - - auto - - manual - exact_count: - description: - - Explicitly ensure an exact count of instances, used with - state=active/present. If specified as C(yes) and I(count) is less than - the servers matched, servers will be deleted to match the count. If - the number of matched servers is fewer than specified in I(count) - additional servers will be added. - type: bool - default: 'no' - extra_client_args: - type: dict - description: - - A hash of key/value pairs to be used when creating the cloudservers - client. This is considered an advanced option, use it wisely and - with caution. - extra_create_args: - type: dict - description: - - A hash of key/value pairs to be used when creating a new server. - This is considered an advanced option, use it wisely and with caution. - files: - type: dict - description: - - Files to insert into the instance. remotefilename:localcontent - flavor: - type: str - description: - - flavor to use for the instance - group: - type: str - description: - - host group to assign to server, is also used for idempotent operations - to ensure a specific number of instances - image: - type: str - description: - - image to use for the instance. Can be an C(id), C(human_id) or C(name). - With I(boot_from_volume), a Cloud Block Storage volume will be created - with this image - instance_ids: - type: list - elements: str - description: - - list of instance ids, currently only used when state='absent' to - remove instances - key_name: - type: str - description: - - key pair to use on the instance - aliases: - - keypair - meta: - type: dict - description: - - A hash of metadata to associate with the instance - name: - type: str - description: - - Name to give the instance - networks: - type: list - elements: str - description: - - The network to attach to the instances. If specified, you must include - ALL networks including the public and private interfaces. Can be C(id) - or C(label). - default: - - public - - private - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - user_data: - type: str - description: - - Data to be uploaded to the servers config drive. This option implies - I(config_drive). Can be a file path or a string - wait: - description: - - wait for the instance to be in state 'running' before returning - type: bool - default: 'no' - wait_timeout: - type: int - description: - - how long before wait gives up, in seconds - default: 300 -author: - - "Jesse Keating (@omgjlk)" - - "Matt Martz (@sivel)" -notes: - - I(exact_count) can be "destructive" if the number of running servers in - the I(group) is larger than that specified in I(count). In such a case, the - I(state) is effectively set to C(absent) and the extra servers are deleted. - In the case of deletion, the returned data structure will have C(action) - set to C(delete), and the oldest servers in the group will be deleted. -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Build a Cloud Server - gather_facts: False - tasks: - - name: Server build request - local_action: - module: rax - credentials: ~/.raxpub - name: rax-test1 - flavor: 5 - image: b11d9567-e412-4255-96b9-bd63ab23bcfe - key_name: my_rackspace_key - files: - /root/test.txt: /home/localuser/test.txt - wait: yes - state: present - networks: - - private - - public - register: rax - -- name: Build an exact count of cloud servers with incremented names - hosts: local - gather_facts: False - tasks: - - name: Server build requests - local_action: - module: rax - credentials: ~/.raxpub - name: test%03d.example.org - flavor: performance1-1 - image: ubuntu-1204-lts-precise-pangolin - state: present - count: 10 - count_offset: 10 - exact_count: yes - group: test - wait: yes - register: rax -''' - -import json -import os -import re -import time - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (FINAL_STATUSES, rax_argument_spec, rax_find_bootable_volume, - rax_find_image, rax_find_network, rax_find_volume, - rax_required_together, rax_to_dict, setup_rax_module) -from ansible.module_utils.six.moves import xrange -from ansible.module_utils.six import string_types - - -def rax_find_server_image(module, server, image, boot_volume): - if not image and boot_volume: - vol = rax_find_bootable_volume(module, pyrax, server, - exit=False) - if not vol: - return None - volume_image_metadata = vol.volume_image_metadata - vol_image_id = volume_image_metadata.get('image_id') - if vol_image_id: - server_image = rax_find_image(module, pyrax, - vol_image_id, exit=False) - if server_image: - server.image = dict(id=server_image) - - # Match image IDs taking care of boot from volume - if image and not server.image: - vol = rax_find_bootable_volume(module, pyrax, server) - volume_image_metadata = vol.volume_image_metadata - vol_image_id = volume_image_metadata.get('image_id') - if not vol_image_id: - return None - server_image = rax_find_image(module, pyrax, - vol_image_id, exit=False) - if image != server_image: - return None - - server.image = dict(id=server_image) - elif image and server.image['id'] != image: - return None - - return server.image - - -def create(module, names=None, flavor=None, image=None, meta=None, key_name=None, - files=None, wait=True, wait_timeout=300, disk_config=None, - group=None, nics=None, extra_create_args=None, user_data=None, - config_drive=False, existing=None, block_device_mapping_v2=None): - names = [] if names is None else names - meta = {} if meta is None else meta - files = {} if files is None else files - nics = [] if nics is None else nics - extra_create_args = {} if extra_create_args is None else extra_create_args - existing = [] if existing is None else existing - block_device_mapping_v2 = [] if block_device_mapping_v2 is None else block_device_mapping_v2 - - cs = pyrax.cloudservers - changed = False - - if user_data: - config_drive = True - - if user_data and os.path.isfile(os.path.expanduser(user_data)): - try: - user_data = os.path.expanduser(user_data) - f = open(user_data) - user_data = f.read() - f.close() - except Exception as e: - module.fail_json(msg='Failed to load %s' % user_data) - - # Handle the file contents - for rpath in files.keys(): - lpath = os.path.expanduser(files[rpath]) - try: - fileobj = open(lpath, 'r') - files[rpath] = fileobj.read() - fileobj.close() - except Exception as e: - module.fail_json(msg='Failed to load %s' % lpath) - try: - servers = [] - bdmv2 = block_device_mapping_v2 - for name in names: - servers.append(cs.servers.create(name=name, image=image, - flavor=flavor, meta=meta, - key_name=key_name, - files=files, nics=nics, - disk_config=disk_config, - config_drive=config_drive, - userdata=user_data, - block_device_mapping_v2=bdmv2, - **extra_create_args)) - except Exception as e: - if e.message: - msg = str(e.message) - else: - msg = repr(e) - module.fail_json(msg=msg) - else: - changed = True - - if wait: - end_time = time.time() + wait_timeout - infinite = wait_timeout == 0 - while infinite or time.time() < end_time: - for server in servers: - try: - server.get() - except Exception: - server.status = 'ERROR' - - if not filter(lambda s: s.status not in FINAL_STATUSES, - servers): - break - time.sleep(5) - - success = [] - error = [] - timeout = [] - for server in servers: - try: - server.get() - except Exception: - server.status = 'ERROR' - instance = rax_to_dict(server, 'server') - if server.status == 'ACTIVE' or not wait: - success.append(instance) - elif server.status == 'ERROR': - error.append(instance) - elif wait: - timeout.append(instance) - - untouched = [rax_to_dict(s, 'server') for s in existing] - instances = success + untouched - - results = { - 'changed': changed, - 'action': 'create', - 'instances': instances, - 'success': success, - 'error': error, - 'timeout': timeout, - 'instance_ids': { - 'instances': [i['id'] for i in instances], - 'success': [i['id'] for i in success], - 'error': [i['id'] for i in error], - 'timeout': [i['id'] for i in timeout] - } - } - - if timeout: - results['msg'] = 'Timeout waiting for all servers to build' - elif error: - results['msg'] = 'Failed to build all servers' - - if 'msg' in results: - module.fail_json(**results) - else: - module.exit_json(**results) - - -def delete(module, instance_ids=None, wait=True, wait_timeout=300, kept=None): - instance_ids = [] if instance_ids is None else instance_ids - kept = [] if kept is None else kept - - cs = pyrax.cloudservers - - changed = False - instances = {} - servers = [] - - for instance_id in instance_ids: - servers.append(cs.servers.get(instance_id)) - - for server in servers: - try: - server.delete() - except Exception as e: - module.fail_json(msg=e.message) - else: - changed = True - - instance = rax_to_dict(server, 'server') - instances[instance['id']] = instance - - # If requested, wait for server deletion - if wait: - end_time = time.time() + wait_timeout - infinite = wait_timeout == 0 - while infinite or time.time() < end_time: - for server in servers: - instance_id = server.id - try: - server.get() - except Exception: - instances[instance_id]['status'] = 'DELETED' - instances[instance_id]['rax_status'] = 'DELETED' - - if not filter(lambda s: s['status'] not in ('', 'DELETED', - 'ERROR'), - instances.values()): - break - - time.sleep(5) - - timeout = filter(lambda s: s['status'] not in ('', 'DELETED', 'ERROR'), - instances.values()) - error = filter(lambda s: s['status'] in ('ERROR'), - instances.values()) - success = filter(lambda s: s['status'] in ('', 'DELETED'), - instances.values()) - - instances = [rax_to_dict(s, 'server') for s in kept] - - results = { - 'changed': changed, - 'action': 'delete', - 'instances': instances, - 'success': success, - 'error': error, - 'timeout': timeout, - 'instance_ids': { - 'instances': [i['id'] for i in instances], - 'success': [i['id'] for i in success], - 'error': [i['id'] for i in error], - 'timeout': [i['id'] for i in timeout] - } - } - - if timeout: - results['msg'] = 'Timeout waiting for all servers to delete' - elif error: - results['msg'] = 'Failed to delete all servers' - - if 'msg' in results: - module.fail_json(**results) - else: - module.exit_json(**results) - - -def cloudservers(module, state=None, name=None, flavor=None, image=None, - meta=None, key_name=None, files=None, wait=True, wait_timeout=300, - disk_config=None, count=1, group=None, instance_ids=None, - exact_count=False, networks=None, count_offset=0, - auto_increment=False, extra_create_args=None, user_data=None, - config_drive=False, boot_from_volume=False, - boot_volume=None, boot_volume_size=None, - boot_volume_terminate=False): - meta = {} if meta is None else meta - files = {} if files is None else files - instance_ids = [] if instance_ids is None else instance_ids - networks = [] if networks is None else networks - extra_create_args = {} if extra_create_args is None else extra_create_args - - cs = pyrax.cloudservers - cnw = pyrax.cloud_networks - if not cnw: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if state == 'present' or (state == 'absent' and instance_ids is None): - if not boot_from_volume and not boot_volume and not image: - module.fail_json(msg='image is required for the "rax" module') - - for arg, value in dict(name=name, flavor=flavor).items(): - if not value: - module.fail_json(msg='%s is required for the "rax" module' % - arg) - - if boot_from_volume and not image and not boot_volume: - module.fail_json(msg='image or boot_volume are required for the ' - '"rax" with boot_from_volume') - - if boot_from_volume and image and not boot_volume_size: - module.fail_json(msg='boot_volume_size is required for the "rax" ' - 'module with boot_from_volume and image') - - if boot_from_volume and image and boot_volume: - image = None - - servers = [] - - # Add the group meta key - if group and 'group' not in meta: - meta['group'] = group - elif 'group' in meta and group is None: - group = meta['group'] - - # Normalize and ensure all metadata values are strings - for k, v in meta.items(): - if isinstance(v, list): - meta[k] = ','.join(['%s' % i for i in v]) - elif isinstance(v, dict): - meta[k] = json.dumps(v) - elif not isinstance(v, string_types): - meta[k] = '%s' % v - - # When using state=absent with group, the absent block won't match the - # names properly. Use the exact_count functionality to decrease the count - # to the desired level - was_absent = False - if group is not None and state == 'absent': - exact_count = True - state = 'present' - was_absent = True - - if image: - image = rax_find_image(module, pyrax, image) - - nics = [] - if networks: - for network in networks: - nics.extend(rax_find_network(module, pyrax, network)) - - # act on the state - if state == 'present': - # Idempotent ensurance of a specific count of servers - if exact_count is not False: - # See if we can find servers that match our options - if group is None: - module.fail_json(msg='"group" must be provided when using ' - '"exact_count"') - - if auto_increment: - numbers = set() - - # See if the name is a printf like string, if not append - # %d to the end - try: - name % 0 - except TypeError as e: - if e.message.startswith('not all'): - name = '%s%%d' % name - else: - module.fail_json(msg=e.message) - - # regex pattern to match printf formatting - pattern = re.sub(r'%\d*[sd]', r'(\d+)', name) - for server in cs.servers.list(): - # Ignore DELETED servers - if server.status == 'DELETED': - continue - if server.metadata.get('group') == group: - servers.append(server) - match = re.search(pattern, server.name) - if match: - number = int(match.group(1)) - numbers.add(number) - - number_range = xrange(count_offset, count_offset + count) - available_numbers = list(set(number_range) - .difference(numbers)) - else: # Not auto incrementing - for server in cs.servers.list(): - # Ignore DELETED servers - if server.status == 'DELETED': - continue - if server.metadata.get('group') == group: - servers.append(server) - # available_numbers not needed here, we inspect auto_increment - # again later - - # If state was absent but the count was changed, - # assume we only wanted to remove that number of instances - if was_absent: - diff = len(servers) - count - if diff < 0: - count = 0 - else: - count = diff - - if len(servers) > count: - # We have more servers than we need, set state='absent' - # and delete the extras, this should delete the oldest - state = 'absent' - kept = servers[:count] - del servers[:count] - instance_ids = [] - for server in servers: - instance_ids.append(server.id) - delete(module, instance_ids=instance_ids, wait=wait, - wait_timeout=wait_timeout, kept=kept) - elif len(servers) < count: - # we have fewer servers than we need - if auto_increment: - # auto incrementing server numbers - names = [] - name_slice = count - len(servers) - numbers_to_use = available_numbers[:name_slice] - for number in numbers_to_use: - names.append(name % number) - else: - # We are not auto incrementing server numbers, - # create a list of 'name' that matches how many we need - names = [name] * (count - len(servers)) - else: - # we have the right number of servers, just return info - # about all of the matched servers - instances = [] - instance_ids = [] - for server in servers: - instances.append(rax_to_dict(server, 'server')) - instance_ids.append(server.id) - module.exit_json(changed=False, action=None, - instances=instances, - success=[], error=[], timeout=[], - instance_ids={'instances': instance_ids, - 'success': [], 'error': [], - 'timeout': []}) - else: # not called with exact_count=True - if group is not None: - if auto_increment: - # we are auto incrementing server numbers, but not with - # exact_count - numbers = set() - - # See if the name is a printf like string, if not append - # %d to the end - try: - name % 0 - except TypeError as e: - if e.message.startswith('not all'): - name = '%s%%d' % name - else: - module.fail_json(msg=e.message) - - # regex pattern to match printf formatting - pattern = re.sub(r'%\d*[sd]', r'(\d+)', name) - for server in cs.servers.list(): - # Ignore DELETED servers - if server.status == 'DELETED': - continue - if server.metadata.get('group') == group: - servers.append(server) - match = re.search(pattern, server.name) - if match: - number = int(match.group(1)) - numbers.add(number) - - number_range = xrange(count_offset, - count_offset + count + len(numbers)) - available_numbers = list(set(number_range) - .difference(numbers)) - names = [] - numbers_to_use = available_numbers[:count] - for number in numbers_to_use: - names.append(name % number) - else: - # Not auto incrementing - names = [name] * count - else: - # No group was specified, and not using exact_count - # Perform more simplistic matching - search_opts = { - 'name': '^%s$' % name, - 'flavor': flavor - } - servers = [] - for server in cs.servers.list(search_opts=search_opts): - # Ignore DELETED servers - if server.status == 'DELETED': - continue - - if not rax_find_server_image(module, server, image, - boot_volume): - continue - - # Ignore servers with non matching metadata - if server.metadata != meta: - continue - servers.append(server) - - if len(servers) >= count: - # We have more servers than were requested, don't do - # anything. Not running with exact_count=True, so we assume - # more is OK - instances = [] - for server in servers: - instances.append(rax_to_dict(server, 'server')) - - instance_ids = [i['id'] for i in instances] - module.exit_json(changed=False, action=None, - instances=instances, success=[], error=[], - timeout=[], - instance_ids={'instances': instance_ids, - 'success': [], 'error': [], - 'timeout': []}) - - # We need more servers to reach out target, create names for - # them, we aren't performing auto_increment here - names = [name] * (count - len(servers)) - - block_device_mapping_v2 = [] - if boot_from_volume: - mapping = { - 'boot_index': '0', - 'delete_on_termination': boot_volume_terminate, - 'destination_type': 'volume', - } - if image: - mapping.update({ - 'uuid': image, - 'source_type': 'image', - 'volume_size': boot_volume_size, - }) - image = None - elif boot_volume: - volume = rax_find_volume(module, pyrax, boot_volume) - mapping.update({ - 'uuid': pyrax.utils.get_id(volume), - 'source_type': 'volume', - }) - block_device_mapping_v2.append(mapping) - - create(module, names=names, flavor=flavor, image=image, - meta=meta, key_name=key_name, files=files, wait=wait, - wait_timeout=wait_timeout, disk_config=disk_config, group=group, - nics=nics, extra_create_args=extra_create_args, - user_data=user_data, config_drive=config_drive, - existing=servers, - block_device_mapping_v2=block_device_mapping_v2) - - elif state == 'absent': - if instance_ids is None: - # We weren't given an explicit list of server IDs to delete - # Let's match instead - search_opts = { - 'name': '^%s$' % name, - 'flavor': flavor - } - for server in cs.servers.list(search_opts=search_opts): - # Ignore DELETED servers - if server.status == 'DELETED': - continue - - if not rax_find_server_image(module, server, image, - boot_volume): - continue - - # Ignore servers with non matching metadata - if meta != server.metadata: - continue - - servers.append(server) - - # Build a list of server IDs to delete - instance_ids = [] - for server in servers: - if len(instance_ids) < count: - instance_ids.append(server.id) - else: - break - - if not instance_ids: - # No server IDs were matched for deletion, or no IDs were - # explicitly provided, just exit and don't do anything - module.exit_json(changed=False, action=None, instances=[], - success=[], error=[], timeout=[], - instance_ids={'instances': [], - 'success': [], 'error': [], - 'timeout': []}) - - delete(module, instance_ids=instance_ids, wait=wait, - wait_timeout=wait_timeout) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - auto_increment=dict(default=True, type='bool'), - boot_from_volume=dict(default=False, type='bool'), - boot_volume=dict(type='str'), - boot_volume_size=dict(type='int', default=100), - boot_volume_terminate=dict(type='bool', default=False), - config_drive=dict(default=False, type='bool'), - count=dict(default=1, type='int'), - count_offset=dict(default=1, type='int'), - disk_config=dict(choices=['auto', 'manual']), - exact_count=dict(default=False, type='bool'), - extra_client_args=dict(type='dict', default={}), - extra_create_args=dict(type='dict', default={}), - files=dict(type='dict', default={}), - flavor=dict(), - group=dict(), - image=dict(), - instance_ids=dict(type='list', elements='str'), - key_name=dict(aliases=['keypair']), - meta=dict(type='dict', default={}), - name=dict(), - networks=dict(type='list', elements='str', default=['public', 'private']), - state=dict(default='present', choices=['present', 'absent']), - user_data=dict(no_log=True), - wait=dict(default=False, type='bool'), - wait_timeout=dict(default=300, type='int'), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - auto_increment = module.params.get('auto_increment') - boot_from_volume = module.params.get('boot_from_volume') - boot_volume = module.params.get('boot_volume') - boot_volume_size = module.params.get('boot_volume_size') - boot_volume_terminate = module.params.get('boot_volume_terminate') - config_drive = module.params.get('config_drive') - count = module.params.get('count') - count_offset = module.params.get('count_offset') - disk_config = module.params.get('disk_config') - if disk_config: - disk_config = disk_config.upper() - exact_count = module.params.get('exact_count', False) - extra_client_args = module.params.get('extra_client_args') - extra_create_args = module.params.get('extra_create_args') - files = module.params.get('files') - flavor = module.params.get('flavor') - group = module.params.get('group') - image = module.params.get('image') - instance_ids = module.params.get('instance_ids') - key_name = module.params.get('key_name') - meta = module.params.get('meta') - name = module.params.get('name') - networks = module.params.get('networks') - state = module.params.get('state') - user_data = module.params.get('user_data') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - - setup_rax_module(module, pyrax) - - if extra_client_args: - pyrax.cloudservers = pyrax.connect_to_cloudservers( - region=pyrax.cloudservers.client.region_name, - **extra_client_args) - client = pyrax.cloudservers.client - if 'bypass_url' in extra_client_args: - client.management_url = extra_client_args['bypass_url'] - - if pyrax.cloudservers is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - cloudservers(module, state=state, name=name, flavor=flavor, - image=image, meta=meta, key_name=key_name, files=files, - wait=wait, wait_timeout=wait_timeout, disk_config=disk_config, - count=count, group=group, instance_ids=instance_ids, - exact_count=exact_count, networks=networks, - count_offset=count_offset, auto_increment=auto_increment, - extra_create_args=extra_create_args, user_data=user_data, - config_drive=config_drive, boot_from_volume=boot_from_volume, - boot_volume=boot_volume, boot_volume_size=boot_volume_size, - boot_volume_terminate=boot_volume_terminate) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cbs.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cbs.py deleted file mode 100644 index abfda419..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cbs.py +++ /dev/null @@ -1,227 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_cbs -short_description: Manipulate Rackspace Cloud Block Storage Volumes -description: - - Manipulate Rackspace Cloud Block Storage Volumes -options: - description: - type: str - description: - - Description to give the volume being created - image: - type: str - description: - - image to use for bootable volumes. Can be an C(id), C(human_id) or - C(name). This option requires C(pyrax>=1.9.3) - meta: - type: dict - description: - - A hash of metadata to associate with the volume - name: - type: str - description: - - Name to give the volume being created - required: true - size: - type: int - description: - - Size of the volume to create in Gigabytes - default: 100 - snapshot_id: - type: str - description: - - The id of the snapshot to create the volume from - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - volume_type: - type: str - description: - - Type of the volume being created - choices: - - SATA - - SSD - default: SATA - wait: - description: - - wait for the volume to be in state 'available' before returning - type: bool - default: 'no' - wait_timeout: - type: int - description: - - how long before wait gives up, in seconds - default: 300 -author: - - "Christopher H. Laco (@claco)" - - "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Build a Block Storage Volume - gather_facts: False - hosts: local - connection: local - tasks: - - name: Storage volume create request - local_action: - module: rax_cbs - credentials: ~/.raxpub - name: my-volume - description: My Volume - volume_type: SSD - size: 150 - region: DFW - wait: yes - state: present - meta: - app: my-cool-app - register: my_volume -''' - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (VOLUME_STATUS, rax_argument_spec, rax_find_image, rax_find_volume, - rax_required_together, rax_to_dict, setup_rax_module) - - -def cloud_block_storage(module, state, name, description, meta, size, - snapshot_id, volume_type, wait, wait_timeout, - image): - changed = False - volume = None - instance = {} - - cbs = pyrax.cloud_blockstorage - - if cbs is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if image: - # pyrax<1.9.3 did not have support for specifying an image when - # creating a volume which is required for bootable volumes - if LooseVersion(pyrax.version.version) < LooseVersion('1.9.3'): - module.fail_json(msg='Creating a bootable volume requires ' - 'pyrax>=1.9.3') - image = rax_find_image(module, pyrax, image) - - volume = rax_find_volume(module, pyrax, name) - - if state == 'present': - if not volume: - kwargs = dict() - if image: - kwargs['image'] = image - try: - volume = cbs.create(name, size=size, volume_type=volume_type, - description=description, - metadata=meta, - snapshot_id=snapshot_id, **kwargs) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - if wait: - attempts = wait_timeout // 5 - pyrax.utils.wait_for_build(volume, interval=5, - attempts=attempts) - - volume.get() - instance = rax_to_dict(volume) - - result = dict(changed=changed, volume=instance) - - if volume.status == 'error': - result['msg'] = '%s failed to build' % volume.id - elif wait and volume.status not in VOLUME_STATUS: - result['msg'] = 'Timeout waiting on %s' % volume.id - - if 'msg' in result: - module.fail_json(**result) - else: - module.exit_json(**result) - - elif state == 'absent': - if volume: - instance = rax_to_dict(volume) - try: - volume.delete() - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, volume=instance) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - description=dict(type='str'), - image=dict(type='str'), - meta=dict(type='dict', default={}), - name=dict(required=True), - size=dict(type='int', default=100), - snapshot_id=dict(), - state=dict(default='present', choices=['present', 'absent']), - volume_type=dict(choices=['SSD', 'SATA'], default='SATA'), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - description = module.params.get('description') - image = module.params.get('image') - meta = module.params.get('meta') - name = module.params.get('name') - size = module.params.get('size') - snapshot_id = module.params.get('snapshot_id') - state = module.params.get('state') - volume_type = module.params.get('volume_type') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - - setup_rax_module(module, pyrax) - - cloud_block_storage(module, state, name, description, meta, size, - snapshot_id, volume_type, wait, wait_timeout, - image) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cbs_attachments.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cbs_attachments.py deleted file mode 100644 index fd210814..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cbs_attachments.py +++ /dev/null @@ -1,219 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_cbs_attachments -short_description: Manipulate Rackspace Cloud Block Storage Volume Attachments -description: - - Manipulate Rackspace Cloud Block Storage Volume Attachments -options: - device: - type: str - description: - - The device path to attach the volume to, e.g. /dev/xvde. - - Before 2.4 this was a required field. Now it can be left to null to auto assign the device name. - volume: - type: str - description: - - Name or id of the volume to attach/detach - required: true - server: - type: str - description: - - Name or id of the server to attach/detach - required: true - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - wait: - description: - - wait for the volume to be in 'in-use'/'available' state before returning - type: bool - default: 'no' - wait_timeout: - type: int - description: - - how long before wait gives up, in seconds - default: 300 -author: - - "Christopher H. Laco (@claco)" - - "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Attach a Block Storage Volume - gather_facts: False - hosts: local - connection: local - tasks: - - name: Storage volume attach request - local_action: - module: rax_cbs_attachments - credentials: ~/.raxpub - volume: my-volume - server: my-server - device: /dev/xvdd - region: DFW - wait: yes - state: present - register: my_volume -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (NON_CALLABLES, - rax_argument_spec, - rax_find_server, - rax_find_volume, - rax_required_together, - rax_to_dict, - setup_rax_module, - ) - - -def cloud_block_storage_attachments(module, state, volume, server, device, - wait, wait_timeout): - cbs = pyrax.cloud_blockstorage - cs = pyrax.cloudservers - - if cbs is None or cs is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - changed = False - instance = {} - - volume = rax_find_volume(module, pyrax, volume) - - if not volume: - module.fail_json(msg='No matching storage volumes were found') - - if state == 'present': - server = rax_find_server(module, pyrax, server) - - if (volume.attachments and - volume.attachments[0]['server_id'] == server.id): - changed = False - elif volume.attachments: - module.fail_json(msg='Volume is attached to another server') - else: - try: - volume.attach_to_instance(server, mountpoint=device) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - volume.get() - - for key, value in vars(volume).items(): - if (isinstance(value, NON_CALLABLES) and - not key.startswith('_')): - instance[key] = value - - result = dict(changed=changed) - - if volume.status == 'error': - result['msg'] = '%s failed to build' % volume.id - elif wait: - attempts = wait_timeout // 5 - pyrax.utils.wait_until(volume, 'status', 'in-use', - interval=5, attempts=attempts) - - volume.get() - result['volume'] = rax_to_dict(volume) - - if 'msg' in result: - module.fail_json(**result) - else: - module.exit_json(**result) - - elif state == 'absent': - server = rax_find_server(module, pyrax, server) - - if (volume.attachments and - volume.attachments[0]['server_id'] == server.id): - try: - volume.detach() - if wait: - pyrax.utils.wait_until(volume, 'status', 'available', - interval=3, attempts=0, - verbose=False) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - volume.get() - changed = True - elif volume.attachments: - module.fail_json(msg='Volume is attached to another server') - - result = dict(changed=changed, volume=rax_to_dict(volume)) - - if volume.status == 'error': - result['msg'] = '%s failed to build' % volume.id - - if 'msg' in result: - module.fail_json(**result) - else: - module.exit_json(**result) - - module.exit_json(changed=changed, volume=instance) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - device=dict(required=False), - volume=dict(required=True), - server=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - device = module.params.get('device') - volume = module.params.get('volume') - server = module.params.get('server') - state = module.params.get('state') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - - setup_rax_module(module, pyrax) - - cloud_block_storage_attachments(module, state, volume, server, device, - wait, wait_timeout) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb.py deleted file mode 100644 index a9c32432..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb.py +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_cdb -short_description: create/delete or resize a Rackspace Cloud Databases instance -description: - - creates / deletes or resize a Rackspace Cloud Databases instance - and optionally waits for it to be 'running'. The name option needs to be - unique since it's used to identify the instance. -options: - name: - type: str - description: - - Name of the databases server instance - required: yes - flavor: - type: int - description: - - flavor to use for the instance 1 to 6 (i.e. 512MB to 16GB) - default: 1 - volume: - type: int - description: - - Volume size of the database 1-150GB - default: 2 - cdb_type: - type: str - description: - - type of instance (i.e. MySQL, MariaDB, Percona) - default: MySQL - aliases: ['type'] - cdb_version: - type: str - description: - - version of database (MySQL supports 5.1 and 5.6, MariaDB supports 10, Percona supports 5.6) - - "The available choices are: C(5.1), C(5.6) and C(10)." - default: '5.6' - aliases: ['version'] - state: - type: str - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present - wait: - description: - - wait for the instance to be in state 'running' before returning - type: bool - default: 'no' - wait_timeout: - type: int - description: - - how long before wait gives up, in seconds - default: 300 -author: "Simon JAILLET (@jails)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Build a Cloud Databases - gather_facts: False - tasks: - - name: Server build request - local_action: - module: rax_cdb - credentials: ~/.raxpub - region: IAD - name: db-server1 - flavor: 1 - volume: 2 - cdb_type: MySQL - cdb_version: 5.6 - wait: yes - state: present - register: rax_db_server -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module - - -def find_instance(name): - - cdb = pyrax.cloud_databases - instances = cdb.list() - if instances: - for instance in instances: - if instance.name == name: - return instance - return False - - -def save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait, - wait_timeout): - - for arg, value in dict(name=name, flavor=flavor, - volume=volume, type=cdb_type, version=cdb_version - ).items(): - if not value: - module.fail_json(msg='%s is required for the "rax_cdb"' - ' module' % arg) - - if not (volume >= 1 and volume <= 150): - module.fail_json(msg='volume is required to be between 1 and 150') - - cdb = pyrax.cloud_databases - - flavors = [] - for item in cdb.list_flavors(): - flavors.append(item.id) - - if not (flavor in flavors): - module.fail_json(msg='unexisting flavor reference "%s"' % str(flavor)) - - changed = False - - instance = find_instance(name) - - if not instance: - action = 'create' - try: - instance = cdb.create(name=name, flavor=flavor, volume=volume, - type=cdb_type, version=cdb_version) - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - else: - action = None - - if instance.volume.size != volume: - action = 'resize' - if instance.volume.size > volume: - module.fail_json(changed=False, action=action, - msg='The new volume size must be larger than ' - 'the current volume size', - cdb=rax_to_dict(instance)) - instance.resize_volume(volume) - changed = True - - if int(instance.flavor.id) != flavor: - action = 'resize' - pyrax.utils.wait_until(instance, 'status', 'ACTIVE', - attempts=wait_timeout) - instance.resize(flavor) - changed = True - - if wait: - pyrax.utils.wait_until(instance, 'status', 'ACTIVE', - attempts=wait_timeout) - - if wait and instance.status != 'ACTIVE': - module.fail_json(changed=changed, action=action, - cdb=rax_to_dict(instance), - msg='Timeout waiting for "%s" databases instance to ' - 'be created' % name) - - module.exit_json(changed=changed, action=action, cdb=rax_to_dict(instance)) - - -def delete_instance(module, name, wait, wait_timeout): - - if not name: - module.fail_json(msg='name is required for the "rax_cdb" module') - - changed = False - - instance = find_instance(name) - if not instance: - module.exit_json(changed=False, action='delete') - - try: - instance.delete() - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - if wait: - pyrax.utils.wait_until(instance, 'status', 'SHUTDOWN', - attempts=wait_timeout) - - if wait and instance.status != 'SHUTDOWN': - module.fail_json(changed=changed, action='delete', - cdb=rax_to_dict(instance), - msg='Timeout waiting for "%s" databases instance to ' - 'be deleted' % name) - - module.exit_json(changed=changed, action='delete', - cdb=rax_to_dict(instance)) - - -def rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait, - wait_timeout): - - # act on the state - if state == 'present': - save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait, - wait_timeout) - elif state == 'absent': - delete_instance(module, name, wait, wait_timeout) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - name=dict(type='str', required=True), - flavor=dict(type='int', default=1), - volume=dict(type='int', default=2), - cdb_type=dict(type='str', default='MySQL', aliases=['type']), - cdb_version=dict(type='str', default='5.6', aliases=['version']), - state=dict(default='present', choices=['present', 'absent']), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - name = module.params.get('name') - flavor = module.params.get('flavor') - volume = module.params.get('volume') - cdb_type = module.params.get('cdb_type') - cdb_version = module.params.get('cdb_version') - state = module.params.get('state') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - - setup_rax_module(module, pyrax) - rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait, wait_timeout) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb_database.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb_database.py deleted file mode 100644 index 86cd1aac..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb_database.py +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: rax_cdb_database -short_description: 'create / delete a database in the Cloud Databases' -description: - - create / delete a database in the Cloud Databases. -options: - cdb_id: - type: str - description: - - The databases server UUID - required: yes - name: - type: str - description: - - Name to give to the database - required: yes - character_set: - type: str - description: - - Set of symbols and encodings - default: 'utf8' - collate: - type: str - description: - - Set of rules for comparing characters in a character set - default: 'utf8_general_ci' - state: - type: str - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present -author: "Simon JAILLET (@jails)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Build a database in Cloud Databases - tasks: - - name: Database build request - local_action: - module: rax_cdb_database - credentials: ~/.raxpub - region: IAD - cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66 - name: db1 - state: present - register: rax_db_database -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module - - -def find_database(instance, name): - try: - database = instance.get_database(name) - except Exception: - return False - - return database - - -def save_database(module, cdb_id, name, character_set, collate): - cdb = pyrax.cloud_databases - - try: - instance = cdb.get(cdb_id) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - changed = False - - database = find_database(instance, name) - - if not database: - try: - database = instance.create_database(name=name, - character_set=character_set, - collate=collate) - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - module.exit_json(changed=changed, action='create', - database=rax_to_dict(database)) - - -def delete_database(module, cdb_id, name): - cdb = pyrax.cloud_databases - - try: - instance = cdb.get(cdb_id) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - changed = False - - database = find_database(instance, name) - - if database: - try: - database.delete() - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - module.exit_json(changed=changed, action='delete', - database=rax_to_dict(database)) - - -def rax_cdb_database(module, state, cdb_id, name, character_set, collate): - - # act on the state - if state == 'present': - save_database(module, cdb_id, name, character_set, collate) - elif state == 'absent': - delete_database(module, cdb_id, name) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - cdb_id=dict(type='str', required=True), - name=dict(type='str', required=True), - character_set=dict(type='str', default='utf8'), - collate=dict(type='str', default='utf8_general_ci'), - state=dict(default='present', choices=['present', 'absent']) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - cdb_id = module.params.get('cdb_id') - name = module.params.get('name') - character_set = module.params.get('character_set') - collate = module.params.get('collate') - state = module.params.get('state') - - setup_rax_module(module, pyrax) - rax_cdb_database(module, state, cdb_id, name, character_set, collate) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb_user.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb_user.py deleted file mode 100644 index 674f17c0..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb_user.py +++ /dev/null @@ -1,220 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_cdb_user -short_description: create / delete a Rackspace Cloud Database -description: - - create / delete a database in the Cloud Databases. -options: - cdb_id: - type: str - description: - - The databases server UUID - required: yes - db_username: - type: str - description: - - Name of the database user - required: yes - db_password: - type: str - description: - - Database user password - required: yes - databases: - type: list - elements: str - description: - - Name of the databases that the user can access - default: [] - host: - type: str - description: - - Specifies the host from which a user is allowed to connect to - the database. Possible values are a string containing an IPv4 address - or "%" to allow connecting from any host - default: '%' - state: - type: str - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present -author: "Simon JAILLET (@jails)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Build a user in Cloud Databases - tasks: - - name: User build request - local_action: - module: rax_cdb_user - credentials: ~/.raxpub - region: IAD - cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66 - db_username: user1 - db_password: user1 - databases: ['db1'] - state: present - register: rax_db_user -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_text -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module - - -def find_user(instance, name): - try: - user = instance.get_user(name) - except Exception: - return False - - return user - - -def save_user(module, cdb_id, name, password, databases, host): - - for arg, value in dict(cdb_id=cdb_id, name=name).items(): - if not value: - module.fail_json(msg='%s is required for the "rax_cdb_user" ' - 'module' % arg) - - cdb = pyrax.cloud_databases - - try: - instance = cdb.get(cdb_id) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - changed = False - - user = find_user(instance, name) - - if not user: - action = 'create' - try: - user = instance.create_user(name=name, - password=password, - database_names=databases, - host=host) - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - else: - action = 'update' - - if user.host != host: - changed = True - - user.update(password=password, host=host) - - former_dbs = set([item.name for item in user.list_user_access()]) - databases = set(databases) - - if databases != former_dbs: - try: - revoke_dbs = [db for db in former_dbs if db not in databases] - user.revoke_user_access(db_names=revoke_dbs) - - new_dbs = [db for db in databases if db not in former_dbs] - user.grant_user_access(db_names=new_dbs) - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - module.exit_json(changed=changed, action=action, user=rax_to_dict(user)) - - -def delete_user(module, cdb_id, name): - - for arg, value in dict(cdb_id=cdb_id, name=name).items(): - if not value: - module.fail_json(msg='%s is required for the "rax_cdb_user"' - ' module' % arg) - - cdb = pyrax.cloud_databases - - try: - instance = cdb.get(cdb_id) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - changed = False - - user = find_user(instance, name) - - if user: - try: - user.delete() - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - module.exit_json(changed=changed, action='delete') - - -def rax_cdb_user(module, state, cdb_id, name, password, databases, host): - - # act on the state - if state == 'present': - save_user(module, cdb_id, name, password, databases, host) - elif state == 'absent': - delete_user(module, cdb_id, name) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - cdb_id=dict(type='str', required=True), - db_username=dict(type='str', required=True), - db_password=dict(type='str', required=True, no_log=True), - databases=dict(type='list', elements='str', default=[]), - host=dict(type='str', default='%'), - state=dict(default='present', choices=['present', 'absent']) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - cdb_id = module.params.get('cdb_id') - name = module.params.get('db_username') - password = module.params.get('db_password') - databases = module.params.get('databases') - host = to_text(module.params.get('host'), errors='surrogate_or_strict') - state = module.params.get('state') - - setup_rax_module(module, pyrax) - rax_cdb_user(module, state, cdb_id, name, password, databases, host) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb.py deleted file mode 100644 index 9160133e..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb.py +++ /dev/null @@ -1,312 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_clb -short_description: create / delete a load balancer in Rackspace Public Cloud -description: - - creates / deletes a Rackspace Public Cloud load balancer. -options: - algorithm: - type: str - description: - - algorithm for the balancer being created - choices: - - RANDOM - - LEAST_CONNECTIONS - - ROUND_ROBIN - - WEIGHTED_LEAST_CONNECTIONS - - WEIGHTED_ROUND_ROBIN - default: LEAST_CONNECTIONS - meta: - type: dict - description: - - A hash of metadata to associate with the instance - name: - type: str - description: - - Name to give the load balancer - required: yes - port: - type: int - description: - - Port for the balancer being created - default: 80 - protocol: - type: str - description: - - Protocol for the balancer being created - choices: - - DNS_TCP - - DNS_UDP - - FTP - - HTTP - - HTTPS - - IMAPS - - IMAPv4 - - LDAP - - LDAPS - - MYSQL - - POP3 - - POP3S - - SMTP - - TCP - - TCP_CLIENT_FIRST - - UDP - - UDP_STREAM - - SFTP - default: HTTP - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - timeout: - type: int - description: - - timeout for communication between the balancer and the node - default: 30 - type: - type: str - description: - - type of interface for the balancer being created - choices: - - PUBLIC - - SERVICENET - default: PUBLIC - vip_id: - type: str - description: - - Virtual IP ID to use when creating the load balancer for purposes of - sharing an IP with another load balancer of another protocol - wait: - description: - - wait for the balancer to be in state 'running' before returning - type: bool - default: 'no' - wait_timeout: - type: int - description: - - how long before wait gives up, in seconds - default: 300 -author: - - "Christopher H. Laco (@claco)" - - "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Build a Load Balancer - gather_facts: False - hosts: local - connection: local - tasks: - - name: Load Balancer create request - local_action: - module: rax_clb - credentials: ~/.raxpub - name: my-lb - port: 8080 - protocol: HTTP - type: SERVICENET - timeout: 30 - region: DFW - wait: yes - state: present - meta: - app: my-cool-app - register: my_lb -''' - - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (CLB_ALGORITHMS, - CLB_PROTOCOLS, - rax_argument_spec, - rax_required_together, - rax_to_dict, - setup_rax_module, - ) - - -def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol, - vip_type, timeout, wait, wait_timeout, vip_id): - if int(timeout) < 30: - module.fail_json(msg='"timeout" must be greater than or equal to 30') - - changed = False - balancers = [] - - clb = pyrax.cloud_loadbalancers - if not clb: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - balancer_list = clb.list() - while balancer_list: - retrieved = clb.list(marker=balancer_list.pop().id) - balancer_list.extend(retrieved) - if len(retrieved) < 2: - break - - for balancer in balancer_list: - if name != balancer.name and name != balancer.id: - continue - - balancers.append(balancer) - - if len(balancers) > 1: - module.fail_json(msg='Multiple Load Balancers were matched by name, ' - 'try using the Load Balancer ID instead') - - if state == 'present': - if isinstance(meta, dict): - metadata = [dict(key=k, value=v) for k, v in meta.items()] - - if not balancers: - try: - virtual_ips = [clb.VirtualIP(type=vip_type, id=vip_id)] - balancer = clb.create(name, metadata=metadata, port=port, - algorithm=algorithm, protocol=protocol, - timeout=timeout, virtual_ips=virtual_ips) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - balancer = balancers[0] - setattr(balancer, 'metadata', - [dict(key=k, value=v) for k, v in - balancer.get_metadata().items()]) - atts = { - 'name': name, - 'algorithm': algorithm, - 'port': port, - 'protocol': protocol, - 'timeout': timeout - } - for att, value in atts.items(): - current = getattr(balancer, att) - if current != value: - changed = True - - if changed: - balancer.update(**atts) - - if balancer.metadata != metadata: - balancer.set_metadata(meta) - changed = True - - virtual_ips = [clb.VirtualIP(type=vip_type)] - current_vip_types = set([v.type for v in balancer.virtual_ips]) - vip_types = set([v.type for v in virtual_ips]) - if current_vip_types != vip_types: - module.fail_json(msg='Load balancer Virtual IP type cannot ' - 'be changed') - - if wait: - attempts = wait_timeout // 5 - pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts) - - balancer.get() - instance = rax_to_dict(balancer, 'clb') - - result = dict(changed=changed, balancer=instance) - - if balancer.status == 'ERROR': - result['msg'] = '%s failed to build' % balancer.id - elif wait and balancer.status not in ('ACTIVE', 'ERROR'): - result['msg'] = 'Timeout waiting on %s' % balancer.id - - if 'msg' in result: - module.fail_json(**result) - else: - module.exit_json(**result) - - elif state == 'absent': - if balancers: - balancer = balancers[0] - try: - balancer.delete() - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - instance = rax_to_dict(balancer, 'clb') - - if wait: - attempts = wait_timeout // 5 - pyrax.utils.wait_until(balancer, 'status', ('DELETED'), - interval=5, attempts=attempts) - else: - instance = {} - - module.exit_json(changed=changed, balancer=instance) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - algorithm=dict(choices=CLB_ALGORITHMS, - default='LEAST_CONNECTIONS'), - meta=dict(type='dict', default={}), - name=dict(required=True), - port=dict(type='int', default=80), - protocol=dict(choices=CLB_PROTOCOLS, default='HTTP'), - state=dict(default='present', choices=['present', 'absent']), - timeout=dict(type='int', default=30), - type=dict(choices=['PUBLIC', 'SERVICENET'], default='PUBLIC'), - vip_id=dict(), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - algorithm = module.params.get('algorithm') - meta = module.params.get('meta') - name = module.params.get('name') - port = module.params.get('port') - protocol = module.params.get('protocol') - state = module.params.get('state') - timeout = int(module.params.get('timeout')) - vip_id = module.params.get('vip_id') - vip_type = module.params.get('type') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - - setup_rax_module(module, pyrax) - - cloud_load_balancer(module, state, name, meta, algorithm, port, protocol, - vip_type, timeout, wait, wait_timeout, vip_id) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb_nodes.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb_nodes.py deleted file mode 100644 index 4adcc66f..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb_nodes.py +++ /dev/null @@ -1,283 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_clb_nodes -short_description: add, modify and remove nodes from a Rackspace Cloud Load Balancer -description: - - Adds, modifies and removes nodes from a Rackspace Cloud Load Balancer -options: - address: - type: str - required: false - description: - - IP address or domain name of the node - condition: - type: str - required: false - choices: - - enabled - - disabled - - draining - description: - - Condition for the node, which determines its role within the load - balancer - load_balancer_id: - type: int - required: true - description: - - Load balancer id - node_id: - type: int - required: false - description: - - Node id - port: - type: int - required: false - description: - - Port number of the load balanced service on the node - state: - type: str - required: false - default: "present" - choices: - - present - - absent - description: - - Indicate desired state of the node - type: - type: str - required: false - choices: - - primary - - secondary - description: - - Type of node - wait: - required: false - default: "no" - type: bool - description: - - Wait for the load balancer to become active before returning - wait_timeout: - type: int - required: false - default: 30 - description: - - How long to wait before giving up and returning an error - weight: - type: int - required: false - description: - - Weight of node - virtualenv: - type: path - description: - - Virtualenv to execute this module in -author: "Lukasz Kawczynski (@neuroid)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Add a new node to the load balancer - local_action: - module: rax_clb_nodes - load_balancer_id: 71 - address: 10.2.2.3 - port: 80 - condition: enabled - type: primary - wait: yes - credentials: /path/to/credentials - -- name: Drain connections from a node - local_action: - module: rax_clb_nodes - load_balancer_id: 71 - node_id: 410 - condition: draining - wait: yes - credentials: /path/to/credentials - -- name: Remove a node from the load balancer - local_action: - module: rax_clb_nodes - load_balancer_id: 71 - node_id: 410 - state: absent - wait: yes - credentials: /path/to/credentials -''' - -import os - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_clb_node_to_dict, rax_required_together, setup_rax_module - - -def _activate_virtualenv(path): - activate_this = os.path.join(path, 'bin', 'activate_this.py') - with open(activate_this) as f: - code = compile(f.read(), activate_this, 'exec') - exec(code) - - -def _get_node(lb, node_id=None, address=None, port=None): - """Return a matching node""" - for node in getattr(lb, 'nodes', []): - match_list = [] - if node_id is not None: - match_list.append(getattr(node, 'id', None) == node_id) - if address is not None: - match_list.append(getattr(node, 'address', None) == address) - if port is not None: - match_list.append(getattr(node, 'port', None) == port) - - if match_list and all(match_list): - return node - - return None - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - address=dict(), - condition=dict(choices=['enabled', 'disabled', 'draining']), - load_balancer_id=dict(required=True, type='int'), - node_id=dict(type='int'), - port=dict(type='int'), - state=dict(default='present', choices=['present', 'absent']), - type=dict(choices=['primary', 'secondary']), - virtualenv=dict(type='path'), - wait=dict(default=False, type='bool'), - wait_timeout=dict(default=30, type='int'), - weight=dict(type='int'), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - address = module.params['address'] - condition = (module.params['condition'] and - module.params['condition'].upper()) - load_balancer_id = module.params['load_balancer_id'] - node_id = module.params['node_id'] - port = module.params['port'] - state = module.params['state'] - typ = module.params['type'] and module.params['type'].upper() - virtualenv = module.params['virtualenv'] - wait = module.params['wait'] - wait_timeout = module.params['wait_timeout'] or 1 - weight = module.params['weight'] - - if virtualenv: - try: - _activate_virtualenv(virtualenv) - except IOError as e: - module.fail_json(msg='Failed to activate virtualenv %s (%s)' % ( - virtualenv, e)) - - setup_rax_module(module, pyrax) - - if not pyrax.cloud_loadbalancers: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - try: - lb = pyrax.cloud_loadbalancers.get(load_balancer_id) - except pyrax.exc.PyraxException as e: - module.fail_json(msg='%s' % e.message) - - node = _get_node(lb, node_id, address, port) - - result = rax_clb_node_to_dict(node) - - if state == 'absent': - if not node: # Removing a non-existent node - module.exit_json(changed=False, state=state) - try: - lb.delete_node(node) - result = {} - except pyrax.exc.NotFound: - module.exit_json(changed=False, state=state) - except pyrax.exc.PyraxException as e: - module.fail_json(msg='%s' % e.message) - else: # present - if not node: - if node_id: # Updating a non-existent node - msg = 'Node %d not found' % node_id - if lb.nodes: - msg += (' (available nodes: %s)' % - ', '.join([str(x.id) for x in lb.nodes])) - module.fail_json(msg=msg) - else: # Creating a new node - try: - node = pyrax.cloudloadbalancers.Node( - address=address, port=port, condition=condition, - weight=weight, type=typ) - resp, body = lb.add_nodes([node]) - result.update(body['nodes'][0]) - except pyrax.exc.PyraxException as e: - module.fail_json(msg='%s' % e.message) - else: # Updating an existing node - mutable = { - 'condition': condition, - 'type': typ, - 'weight': weight, - } - - for name, value in mutable.items(): - if value is None or value == getattr(node, name): - mutable.pop(name) - - if not mutable: - module.exit_json(changed=False, state=state, node=result) - - try: - # The diff has to be set explicitly to update node's weight and - # type; this should probably be fixed in pyrax - lb.update_node(node, diff=mutable) - result.update(mutable) - except pyrax.exc.PyraxException as e: - module.fail_json(msg='%s' % e.message) - - if wait: - pyrax.utils.wait_until(lb, "status", "ACTIVE", interval=1, - attempts=wait_timeout) - if lb.status != 'ACTIVE': - module.fail_json( - msg='Load balancer not active after %ds (current status: %s)' % - (wait_timeout, lb.status.lower())) - - kwargs = {'node': result} if result else {} - module.exit_json(changed=True, state=state, **kwargs) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb_ssl.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb_ssl.py deleted file mode 100644 index adf37512..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb_ssl.py +++ /dev/null @@ -1,282 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: rax_clb_ssl -short_description: Manage SSL termination for a Rackspace Cloud Load Balancer. -description: -- Set up, reconfigure, or remove SSL termination for an existing load balancer. -options: - loadbalancer: - type: str - description: - - Name or ID of the load balancer on which to manage SSL termination. - required: true - state: - type: str - description: - - If set to "present", SSL termination will be added to this load balancer. - - If "absent", SSL termination will be removed instead. - choices: - - present - - absent - default: present - enabled: - description: - - If set to "false", temporarily disable SSL termination without discarding - - existing credentials. - default: true - type: bool - private_key: - type: str - description: - - The private SSL key as a string in PEM format. - certificate: - type: str - description: - - The public SSL certificates as a string in PEM format. - intermediate_certificate: - type: str - description: - - One or more intermediate certificate authorities as a string in PEM - - format, concatenated into a single string. - secure_port: - type: int - description: - - The port to listen for secure traffic. - default: 443 - secure_traffic_only: - description: - - If "true", the load balancer will *only* accept secure traffic. - default: false - type: bool - https_redirect: - description: - - If "true", the load balancer will redirect HTTP traffic to HTTPS. - - Requires "secure_traffic_only" to be true. Incurs an implicit wait if SSL - - termination is also applied or removed. - type: bool - wait: - description: - - Wait for the balancer to be in state "running" before turning. - default: false - type: bool - wait_timeout: - type: int - description: - - How long before "wait" gives up, in seconds. - default: 300 -author: Ash Wilson (@smashwilson) -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Enable SSL termination on a load balancer - community.general.rax_clb_ssl: - loadbalancer: the_loadbalancer - state: present - private_key: "{{ lookup('file', 'credentials/server.key' ) }}" - certificate: "{{ lookup('file', 'credentials/server.crt' ) }}" - intermediate_certificate: "{{ lookup('file', 'credentials/trust-chain.crt') }}" - secure_traffic_only: true - wait: true - -- name: Disable SSL termination - community.general.rax_clb_ssl: - loadbalancer: "{{ registered_lb.balancer.id }}" - state: absent - wait: true -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, - rax_find_loadbalancer, - rax_required_together, - rax_to_dict, - setup_rax_module, - ) - - -def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key, - certificate, intermediate_certificate, secure_port, - secure_traffic_only, https_redirect, - wait, wait_timeout): - # Validate arguments. - - if state == 'present': - if not private_key: - module.fail_json(msg="private_key must be provided.") - else: - private_key = private_key.strip() - - if not certificate: - module.fail_json(msg="certificate must be provided.") - else: - certificate = certificate.strip() - - attempts = wait_timeout // 5 - - # Locate the load balancer. - - balancer = rax_find_loadbalancer(module, pyrax, loadbalancer) - existing_ssl = balancer.get_ssl_termination() - - changed = False - - if state == 'present': - # Apply or reconfigure SSL termination on the load balancer. - ssl_attrs = dict( - securePort=secure_port, - privatekey=private_key, - certificate=certificate, - intermediateCertificate=intermediate_certificate, - enabled=enabled, - secureTrafficOnly=secure_traffic_only - ) - - needs_change = False - - if existing_ssl: - for ssl_attr, value in ssl_attrs.items(): - if ssl_attr == 'privatekey': - # The private key is not included in get_ssl_termination's - # output (as it shouldn't be). Also, if you're changing the - # private key, you'll also be changing the certificate, - # so we don't lose anything by not checking it. - continue - - if value is not None and existing_ssl.get(ssl_attr) != value: - # module.fail_json(msg='Unnecessary change', attr=ssl_attr, value=value, existing=existing_ssl.get(ssl_attr)) - needs_change = True - else: - needs_change = True - - if needs_change: - try: - balancer.add_ssl_termination(**ssl_attrs) - except pyrax.exceptions.PyraxException as e: - module.fail_json(msg='%s' % e.message) - changed = True - elif state == 'absent': - # Remove SSL termination if it's already configured. - if existing_ssl: - try: - balancer.delete_ssl_termination() - except pyrax.exceptions.PyraxException as e: - module.fail_json(msg='%s' % e.message) - changed = True - - if https_redirect is not None and balancer.httpsRedirect != https_redirect: - if changed: - # This wait is unavoidable because load balancers are immutable - # while the SSL termination changes above are being applied. - pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts) - - try: - balancer.update(httpsRedirect=https_redirect) - except pyrax.exceptions.PyraxException as e: - module.fail_json(msg='%s' % e.message) - changed = True - - if changed and wait: - pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts) - - balancer.get() - new_ssl_termination = balancer.get_ssl_termination() - - # Intentionally omit the private key from the module output, so you don't - # accidentally echo it with `ansible-playbook -v` or `debug`, and the - # certificate, which is just long. Convert other attributes to snake_case - # and include https_redirect at the top-level. - if new_ssl_termination: - new_ssl = dict( - enabled=new_ssl_termination['enabled'], - secure_port=new_ssl_termination['securePort'], - secure_traffic_only=new_ssl_termination['secureTrafficOnly'] - ) - else: - new_ssl = None - - result = dict( - changed=changed, - https_redirect=balancer.httpsRedirect, - ssl_termination=new_ssl, - balancer=rax_to_dict(balancer, 'clb') - ) - success = True - - if balancer.status == 'ERROR': - result['msg'] = '%s failed to build' % balancer.id - success = False - elif wait and balancer.status not in ('ACTIVE', 'ERROR'): - result['msg'] = 'Timeout waiting on %s' % balancer.id - success = False - - if success: - module.exit_json(**result) - else: - module.fail_json(**result) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update(dict( - loadbalancer=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - enabled=dict(type='bool', default=True), - private_key=dict(no_log=True), - certificate=dict(), - intermediate_certificate=dict(), - secure_port=dict(type='int', default=443), - secure_traffic_only=dict(type='bool', default=False), - https_redirect=dict(type='bool'), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300) - )) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module.') - - loadbalancer = module.params.get('loadbalancer') - state = module.params.get('state') - enabled = module.boolean(module.params.get('enabled')) - private_key = module.params.get('private_key') - certificate = module.params.get('certificate') - intermediate_certificate = module.params.get('intermediate_certificate') - secure_port = module.params.get('secure_port') - secure_traffic_only = module.boolean(module.params.get('secure_traffic_only')) - https_redirect = module.boolean(module.params.get('https_redirect')) - wait = module.boolean(module.params.get('wait')) - wait_timeout = module.params.get('wait_timeout') - - setup_rax_module(module, pyrax) - - cloud_load_balancer_ssl( - module, loadbalancer, state, enabled, private_key, certificate, - intermediate_certificate, secure_port, secure_traffic_only, - https_redirect, wait, wait_timeout - ) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_dns.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_dns.py deleted file mode 100644 index 915e13a9..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_dns.py +++ /dev/null @@ -1,173 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_dns -short_description: Manage domains on Rackspace Cloud DNS -description: - - Manage domains on Rackspace Cloud DNS -options: - comment: - type: str - description: - - Brief description of the domain. Maximum length of 160 characters - email: - type: str - description: - - Email address of the domain administrator - name: - type: str - description: - - Domain name to create - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - ttl: - type: int - description: - - Time to live of domain in seconds - default: 3600 -notes: - - "It is recommended that plays utilizing this module be run with - C(serial: 1) to avoid exceeding the API request limit imposed by - the Rackspace CloudDNS API" -author: "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Create domain - hosts: all - gather_facts: False - tasks: - - name: Domain create request - local_action: - module: rax_dns - credentials: ~/.raxpub - name: example.org - email: admin@example.org - register: rax_dns -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, - rax_required_together, - rax_to_dict, - setup_rax_module, - ) - - -def rax_dns(module, comment, email, name, state, ttl): - changed = False - - dns = pyrax.cloud_dns - if not dns: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if state == 'present': - if not email: - module.fail_json(msg='An "email" attribute is required for ' - 'creating a domain') - - try: - domain = dns.find(name=name) - except pyrax.exceptions.NoUniqueMatch as e: - module.fail_json(msg='%s' % e.message) - except pyrax.exceptions.NotFound: - try: - domain = dns.create(name=name, emailAddress=email, ttl=ttl, - comment=comment) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - update = {} - if comment != getattr(domain, 'comment', None): - update['comment'] = comment - if ttl != getattr(domain, 'ttl', None): - update['ttl'] = ttl - if email != getattr(domain, 'emailAddress', None): - update['emailAddress'] = email - - if update: - try: - domain.update(**update) - changed = True - domain.get() - except Exception as e: - module.fail_json(msg='%s' % e.message) - - elif state == 'absent': - try: - domain = dns.find(name=name) - except pyrax.exceptions.NotFound: - domain = {} - except Exception as e: - module.fail_json(msg='%s' % e.message) - - if domain: - try: - domain.delete() - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, domain=rax_to_dict(domain)) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - comment=dict(), - email=dict(), - name=dict(), - state=dict(default='present', choices=['present', 'absent']), - ttl=dict(type='int', default=3600), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - comment = module.params.get('comment') - email = module.params.get('email') - name = module.params.get('name') - state = module.params.get('state') - ttl = module.params.get('ttl') - - setup_rax_module(module, pyrax, False) - - rax_dns(module, comment, email, name, state, ttl) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_dns_record.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_dns_record.py deleted file mode 100644 index 1a6986de..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_dns_record.py +++ /dev/null @@ -1,353 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_dns_record -short_description: Manage DNS records on Rackspace Cloud DNS -description: - - Manage DNS records on Rackspace Cloud DNS -options: - comment: - type: str - description: - - Brief description of the domain. Maximum length of 160 characters - data: - type: str - description: - - IP address for A/AAAA record, FQDN for CNAME/MX/NS, or text data for - SRV/TXT - required: True - domain: - type: str - description: - - Domain name to create the record in. This is an invalid option when - type=PTR - loadbalancer: - type: str - description: - - Load Balancer ID to create a PTR record for. Only used with type=PTR - name: - type: str - description: - - FQDN record name to create - required: True - overwrite: - description: - - Add new records if data doesn't match, instead of updating existing - record with matching name. If there are already multiple records with - matching name and overwrite=true, this module will fail. - default: true - type: bool - priority: - type: int - description: - - Required for MX and SRV records, but forbidden for other record types. - If specified, must be an integer from 0 to 65535. - server: - type: str - description: - - Server ID to create a PTR record for. Only used with type=PTR - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - ttl: - type: int - description: - - Time to live of record in seconds - default: 3600 - type: - type: str - description: - - DNS record type - choices: - - A - - AAAA - - CNAME - - MX - - NS - - SRV - - TXT - - PTR - required: true -notes: - - "It is recommended that plays utilizing this module be run with - C(serial: 1) to avoid exceeding the API request limit imposed by - the Rackspace CloudDNS API" - - To manipulate a C(PTR) record either C(loadbalancer) or C(server) must be - supplied - - As of version 1.7, the C(type) field is required and no longer defaults to an C(A) record. - - C(PTR) record support was added in version 1.7 -author: "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Create DNS Records - hosts: all - gather_facts: False - tasks: - - name: Create A record - local_action: - module: rax_dns_record - credentials: ~/.raxpub - domain: example.org - name: www.example.org - data: "{{ rax_accessipv4 }}" - type: A - register: a_record - - - name: Create PTR record - local_action: - module: rax_dns_record - credentials: ~/.raxpub - server: "{{ rax_id }}" - name: "{{ inventory_hostname }}" - region: DFW - register: ptr_record -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, - rax_find_loadbalancer, - rax_find_server, - rax_required_together, - rax_to_dict, - setup_rax_module, - ) - - -def rax_dns_record_ptr(module, data=None, comment=None, loadbalancer=None, - name=None, server=None, state='present', ttl=7200): - changed = False - results = [] - - dns = pyrax.cloud_dns - - if not dns: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if loadbalancer: - item = rax_find_loadbalancer(module, pyrax, loadbalancer) - elif server: - item = rax_find_server(module, pyrax, server) - - if state == 'present': - current = dns.list_ptr_records(item) - for record in current: - if record.data == data: - if record.ttl != ttl or record.name != name: - try: - dns.update_ptr_record(item, record, name, data, ttl) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - record.ttl = ttl - record.name = name - results.append(rax_to_dict(record)) - break - else: - results.append(rax_to_dict(record)) - break - - if not results: - record = dict(name=name, type='PTR', data=data, ttl=ttl, - comment=comment) - try: - results = dns.add_ptr_records(item, [record]) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, records=results) - - elif state == 'absent': - current = dns.list_ptr_records(item) - for record in current: - if record.data == data: - results.append(rax_to_dict(record)) - break - - if results: - try: - dns.delete_ptr_records(item, data) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, records=results) - - -def rax_dns_record(module, comment=None, data=None, domain=None, name=None, - overwrite=True, priority=None, record_type='A', - state='present', ttl=7200): - """Function for manipulating record types other than PTR""" - - changed = False - - dns = pyrax.cloud_dns - if not dns: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if state == 'present': - if not priority and record_type in ['MX', 'SRV']: - module.fail_json(msg='A "priority" attribute is required for ' - 'creating a MX or SRV record') - - try: - domain = dns.find(name=domain) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - try: - if overwrite: - record = domain.find_record(record_type, name=name) - else: - record = domain.find_record(record_type, name=name, data=data) - except pyrax.exceptions.DomainRecordNotUnique as e: - module.fail_json(msg='overwrite=true and there are multiple matching records') - except pyrax.exceptions.DomainRecordNotFound as e: - try: - record_data = { - 'type': record_type, - 'name': name, - 'data': data, - 'ttl': ttl - } - if comment: - record_data.update(dict(comment=comment)) - if priority and record_type.upper() in ['MX', 'SRV']: - record_data.update(dict(priority=priority)) - - record = domain.add_records([record_data])[0] - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - update = {} - if comment != getattr(record, 'comment', None): - update['comment'] = comment - if ttl != getattr(record, 'ttl', None): - update['ttl'] = ttl - if priority != getattr(record, 'priority', None): - update['priority'] = priority - if data != getattr(record, 'data', None): - update['data'] = data - - if update: - try: - record.update(**update) - changed = True - record.get() - except Exception as e: - module.fail_json(msg='%s' % e.message) - - elif state == 'absent': - try: - domain = dns.find(name=domain) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - try: - record = domain.find_record(record_type, name=name, data=data) - except pyrax.exceptions.DomainRecordNotFound as e: - record = {} - except pyrax.exceptions.DomainRecordNotUnique as e: - module.fail_json(msg='%s' % e.message) - - if record: - try: - record.delete() - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, record=rax_to_dict(record)) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - comment=dict(), - data=dict(required=True), - domain=dict(), - loadbalancer=dict(), - name=dict(required=True), - overwrite=dict(type='bool', default=True), - priority=dict(type='int'), - server=dict(), - state=dict(default='present', choices=['present', 'absent']), - ttl=dict(type='int', default=3600), - type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', - 'SRV', 'TXT', 'PTR']) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - mutually_exclusive=[ - ['server', 'loadbalancer', 'domain'], - ], - required_one_of=[ - ['server', 'loadbalancer', 'domain'], - ], - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - comment = module.params.get('comment') - data = module.params.get('data') - domain = module.params.get('domain') - loadbalancer = module.params.get('loadbalancer') - name = module.params.get('name') - overwrite = module.params.get('overwrite') - priority = module.params.get('priority') - server = module.params.get('server') - state = module.params.get('state') - ttl = module.params.get('ttl') - record_type = module.params.get('type') - - setup_rax_module(module, pyrax, False) - - if record_type.upper() == 'PTR': - if not server and not loadbalancer: - module.fail_json(msg='one of the following is required: ' - 'server,loadbalancer') - rax_dns_record_ptr(module, data=data, comment=comment, - loadbalancer=loadbalancer, name=name, server=server, - state=state, ttl=ttl) - else: - rax_dns_record(module, comment=comment, data=data, domain=domain, - name=name, overwrite=overwrite, priority=priority, - record_type=record_type, state=state, ttl=ttl) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_facts.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_facts.py deleted file mode 100644 index 0288a5e3..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_facts.py +++ /dev/null @@ -1,144 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_facts -short_description: Gather facts for Rackspace Cloud Servers -description: - - Gather facts for Rackspace Cloud Servers. -options: - address: - type: str - description: - - Server IP address to retrieve facts for, will match any IP assigned to - the server - id: - type: str - description: - - Server ID to retrieve facts for - name: - type: str - description: - - Server name to retrieve facts for -author: "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Gather info about servers - hosts: all - gather_facts: False - tasks: - - name: Get facts about servers - local_action: - module: rax_facts - credentials: ~/.raxpub - name: "{{ inventory_hostname }}" - region: DFW - - name: Map some facts - ansible.builtin.set_fact: - ansible_ssh_host: "{{ rax_accessipv4 }}" -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, - rax_required_together, - rax_to_dict, - setup_rax_module, - ) - - -def rax_facts(module, address, name, server_id): - changed = False - - cs = pyrax.cloudservers - - if cs is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - ansible_facts = {} - - search_opts = {} - if name: - search_opts = dict(name='^%s$' % name) - try: - servers = cs.servers.list(search_opts=search_opts) - except Exception as e: - module.fail_json(msg='%s' % e.message) - elif address: - servers = [] - try: - for server in cs.servers.list(): - for addresses in server.networks.values(): - if address in addresses: - servers.append(server) - break - except Exception as e: - module.fail_json(msg='%s' % e.message) - elif server_id: - servers = [] - try: - servers.append(cs.servers.get(server_id)) - except Exception as e: - pass - - servers[:] = [server for server in servers if server.status != "DELETED"] - - if len(servers) > 1: - module.fail_json(msg='Multiple servers found matching provided ' - 'search parameters') - elif len(servers) == 1: - ansible_facts = rax_to_dict(servers[0], 'server') - - module.exit_json(changed=changed, ansible_facts=ansible_facts) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - address=dict(), - id=dict(), - name=dict(), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - mutually_exclusive=[['address', 'id', 'name']], - required_one_of=[['address', 'id', 'name']], - supports_check_mode=True, - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - address = module.params.get('address') - server_id = module.params.get('id') - name = module.params.get('name') - - setup_rax_module(module, pyrax) - - rax_facts(module, address, name, server_id) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_files.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_files.py deleted file mode 100644 index 1e1f82c8..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_files.py +++ /dev/null @@ -1,393 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Paul Durivage -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_files -short_description: Manipulate Rackspace Cloud Files Containers -description: - - Manipulate Rackspace Cloud Files Containers -options: - clear_meta: - description: - - Optionally clear existing metadata when applying metadata to existing containers. - Selecting this option is only appropriate when setting type=meta - type: bool - default: "no" - container: - type: str - description: - - The container to use for container or metadata operations. - meta: - type: dict - description: - - A hash of items to set as metadata values on a container - private: - description: - - Used to set a container as private, removing it from the CDN. B(Warning!) - Private containers, if previously made public, can have live objects - available until the TTL on cached objects expires - type: bool - default: false - public: - description: - - Used to set a container as public, available via the Cloud Files CDN - type: bool - default: false - region: - type: str - description: - - Region to create an instance in - state: - type: str - description: - - Indicate desired state of the resource - choices: ['present', 'absent', 'list'] - default: present - ttl: - type: int - description: - - In seconds, set a container-wide TTL for all objects cached on CDN edge nodes. - Setting a TTL is only appropriate for containers that are public - type: - type: str - description: - - Type of object to do work on, i.e. metadata object or a container object - choices: - - container - - meta - default: container - web_error: - type: str - description: - - Sets an object to be presented as the HTTP error page when accessed by the CDN URL - web_index: - type: str - description: - - Sets an object to be presented as the HTTP index page when accessed by the CDN URL -author: "Paul Durivage (@angstwad)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: "Test Cloud Files Containers" - hosts: local - gather_facts: no - tasks: - - name: "List all containers" - community.general.rax_files: - state: list - - - name: "Create container called 'mycontainer'" - community.general.rax_files: - container: mycontainer - - - name: "Create container 'mycontainer2' with metadata" - community.general.rax_files: - container: mycontainer2 - meta: - key: value - file_for: someuser@example.com - - - name: "Set a container's web index page" - community.general.rax_files: - container: mycontainer - web_index: index.html - - - name: "Set a container's web error page" - community.general.rax_files: - container: mycontainer - web_error: error.html - - - name: "Make container public" - community.general.rax_files: - container: mycontainer - public: yes - - - name: "Make container public with a 24 hour TTL" - community.general.rax_files: - container: mycontainer - public: yes - ttl: 86400 - - - name: "Make container private" - community.general.rax_files: - container: mycontainer - private: yes - -- name: "Test Cloud Files Containers Metadata Storage" - hosts: local - gather_facts: no - tasks: - - name: "Get mycontainer2 metadata" - community.general.rax_files: - container: mycontainer2 - type: meta - - - name: "Set mycontainer2 metadata" - community.general.rax_files: - container: mycontainer2 - type: meta - meta: - uploaded_by: someuser@example.com - - - name: "Remove mycontainer2 metadata" - community.general.rax_files: - container: "mycontainer2" - type: meta - state: absent - meta: - key: "" - file_for: "" -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError as e: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -EXIT_DICT = dict(success=True) -META_PREFIX = 'x-container-meta-' - - -def _get_container(module, cf, container): - try: - return cf.get_container(container) - except pyrax.exc.NoSuchContainer as e: - module.fail_json(msg=e.message) - - -def _fetch_meta(module, container): - EXIT_DICT['meta'] = dict() - try: - for k, v in container.get_metadata().items(): - split_key = k.split(META_PREFIX)[-1] - EXIT_DICT['meta'][split_key] = v - except Exception as e: - module.fail_json(msg=e.message) - - -def meta(cf, module, container_, state, meta_, clear_meta): - c = _get_container(module, cf, container_) - - if meta_ and state == 'present': - try: - meta_set = c.set_metadata(meta_, clear=clear_meta) - except Exception as e: - module.fail_json(msg=e.message) - elif meta_ and state == 'absent': - remove_results = [] - for k, v in meta_.items(): - c.remove_metadata_key(k) - remove_results.append(k) - EXIT_DICT['deleted_meta_keys'] = remove_results - elif state == 'absent': - remove_results = [] - for k, v in c.get_metadata().items(): - c.remove_metadata_key(k) - remove_results.append(k) - EXIT_DICT['deleted_meta_keys'] = remove_results - - _fetch_meta(module, c) - _locals = locals().keys() - - EXIT_DICT['container'] = c.name - if 'meta_set' in _locals or 'remove_results' in _locals: - EXIT_DICT['changed'] = True - - module.exit_json(**EXIT_DICT) - - -def container(cf, module, container_, state, meta_, clear_meta, ttl, public, - private, web_index, web_error): - if public and private: - module.fail_json(msg='container cannot be simultaneously ' - 'set to public and private') - - if state == 'absent' and (meta_ or clear_meta or public or private or web_index or web_error): - module.fail_json(msg='state cannot be omitted when setting/removing ' - 'attributes on a container') - - if state == 'list': - # We don't care if attributes are specified, let's list containers - EXIT_DICT['containers'] = cf.list_containers() - module.exit_json(**EXIT_DICT) - - try: - c = cf.get_container(container_) - except pyrax.exc.NoSuchContainer as e: - # Make the container if state=present, otherwise bomb out - if state == 'present': - try: - c = cf.create_container(container_) - except Exception as e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['changed'] = True - EXIT_DICT['created'] = True - else: - module.fail_json(msg=e.message) - else: - # Successfully grabbed a container object - # Delete if state is absent - if state == 'absent': - try: - cont_deleted = c.delete() - except Exception as e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['deleted'] = True - - if meta_: - try: - meta_set = c.set_metadata(meta_, clear=clear_meta) - except Exception as e: - module.fail_json(msg=e.message) - finally: - _fetch_meta(module, c) - - if ttl: - try: - c.cdn_ttl = ttl - except Exception as e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['ttl'] = c.cdn_ttl - - if public: - try: - cont_public = c.make_public() - except Exception as e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['container_urls'] = dict(url=c.cdn_uri, - ssl_url=c.cdn_ssl_uri, - streaming_url=c.cdn_streaming_uri, - ios_uri=c.cdn_ios_uri) - - if private: - try: - cont_private = c.make_private() - except Exception as e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['set_private'] = True - - if web_index: - try: - cont_web_index = c.set_web_index_page(web_index) - except Exception as e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['set_index'] = True - finally: - _fetch_meta(module, c) - - if web_error: - try: - cont_err_index = c.set_web_error_page(web_error) - except Exception as e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['set_error'] = True - finally: - _fetch_meta(module, c) - - EXIT_DICT['container'] = c.name - EXIT_DICT['objs_in_container'] = c.object_count - EXIT_DICT['total_bytes'] = c.total_bytes - - _locals = locals().keys() - if ('cont_deleted' in _locals - or 'meta_set' in _locals - or 'cont_public' in _locals - or 'cont_private' in _locals - or 'cont_web_index' in _locals - or 'cont_err_index' in _locals): - EXIT_DICT['changed'] = True - - module.exit_json(**EXIT_DICT) - - -def cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public, - private, web_index, web_error): - """ Dispatch from here to work with metadata or file objects """ - cf = pyrax.cloudfiles - - if cf is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if typ == "container": - container(cf, module, container_, state, meta_, clear_meta, ttl, - public, private, web_index, web_error) - else: - meta(cf, module, container_, state, meta_, clear_meta) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - container=dict(), - state=dict(choices=['present', 'absent', 'list'], - default='present'), - meta=dict(type='dict', default=dict()), - clear_meta=dict(default=False, type='bool'), - type=dict(choices=['container', 'meta'], default='container'), - ttl=dict(type='int'), - public=dict(default=False, type='bool'), - private=dict(default=False, type='bool'), - web_index=dict(), - web_error=dict() - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - container_ = module.params.get('container') - state = module.params.get('state') - meta_ = module.params.get('meta') - clear_meta = module.params.get('clear_meta') - typ = module.params.get('type') - ttl = module.params.get('ttl') - public = module.params.get('public') - private = module.params.get('private') - web_index = module.params.get('web_index') - web_error = module.params.get('web_error') - - if state in ['present', 'absent'] and not container_: - module.fail_json(msg='please specify a container name') - if clear_meta and not typ == 'meta': - module.fail_json(msg='clear_meta can only be used when setting ' - 'metadata') - - setup_rax_module(module, pyrax) - cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public, - private, web_index, web_error) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_files_objects.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_files_objects.py deleted file mode 100644 index 3269fe05..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_files_objects.py +++ /dev/null @@ -1,609 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Paul Durivage -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_files_objects -short_description: Upload, download, and delete objects in Rackspace Cloud Files -description: - - Upload, download, and delete objects in Rackspace Cloud Files -options: - clear_meta: - description: - - Optionally clear existing metadata when applying metadata to existing objects. - Selecting this option is only appropriate when setting type=meta - type: bool - default: 'no' - container: - type: str - description: - - The container to use for file object operations. - required: true - dest: - type: str - description: - - The destination of a "get" operation; i.e. a local directory, "/home/user/myfolder". - Used to specify the destination of an operation on a remote object; i.e. a file name, - "file1", or a comma-separated list of remote objects, "file1,file2,file17" - expires: - type: int - description: - - Used to set an expiration on a file or folder uploaded to Cloud Files. - Requires an integer, specifying expiration in seconds - meta: - type: dict - description: - - A hash of items to set as metadata values on an uploaded file or folder - method: - type: str - description: - - The method of operation to be performed. For example, put to upload files - to Cloud Files, get to download files from Cloud Files or delete to delete - remote objects in Cloud Files - choices: - - get - - put - - delete - default: get - src: - type: str - description: - - Source from which to upload files. Used to specify a remote object as a source for - an operation, i.e. a file name, "file1", or a comma-separated list of remote objects, - "file1,file2,file17". src and dest are mutually exclusive on remote-only object operations - structure: - description: - - Used to specify whether to maintain nested directory structure when downloading objects - from Cloud Files. Setting to false downloads the contents of a container to a single, - flat directory - type: bool - default: 'yes' - type: - type: str - description: - - Type of object to do work on - - Metadata object or a file object - choices: - - file - - meta - default: file -author: "Paul Durivage (@angstwad)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: "Test Cloud Files Objects" - hosts: local - gather_facts: False - tasks: - - name: "Get objects from test container" - community.general.rax_files_objects: - container: testcont - dest: ~/Downloads/testcont - - - name: "Get single object from test container" - community.general.rax_files_objects: - container: testcont - src: file1 - dest: ~/Downloads/testcont - - - name: "Get several objects from test container" - community.general.rax_files_objects: - container: testcont - src: file1,file2,file3 - dest: ~/Downloads/testcont - - - name: "Delete one object in test container" - community.general.rax_files_objects: - container: testcont - method: delete - dest: file1 - - - name: "Delete several objects in test container" - community.general.rax_files_objects: - container: testcont - method: delete - dest: file2,file3,file4 - - - name: "Delete all objects in test container" - community.general.rax_files_objects: - container: testcont - method: delete - - - name: "Upload all files to test container" - community.general.rax_files_objects: - container: testcont - method: put - src: ~/Downloads/onehundred - - - name: "Upload one file to test container" - community.general.rax_files_objects: - container: testcont - method: put - src: ~/Downloads/testcont/file1 - - - name: "Upload one file to test container with metadata" - community.general.rax_files_objects: - container: testcont - src: ~/Downloads/testcont/file2 - method: put - meta: - testkey: testdata - who_uploaded_this: someuser@example.com - - - name: "Upload one file to test container with TTL of 60 seconds" - community.general.rax_files_objects: - container: testcont - method: put - src: ~/Downloads/testcont/file3 - expires: 60 - - - name: "Attempt to get remote object that does not exist" - community.general.rax_files_objects: - container: testcont - method: get - src: FileThatDoesNotExist.jpg - dest: ~/Downloads/testcont - ignore_errors: yes - - - name: "Attempt to delete remote object that does not exist" - community.general.rax_files_objects: - container: testcont - method: delete - dest: FileThatDoesNotExist.jpg - ignore_errors: yes - -- name: "Test Cloud Files Objects Metadata" - hosts: local - gather_facts: false - tasks: - - name: "Get metadata on one object" - community.general.rax_files_objects: - container: testcont - type: meta - dest: file2 - - - name: "Get metadata on several objects" - community.general.rax_files_objects: - container: testcont - type: meta - src: file2,file1 - - - name: "Set metadata on an object" - community.general.rax_files_objects: - container: testcont - type: meta - dest: file17 - method: put - meta: - key1: value1 - key2: value2 - clear_meta: true - - - name: "Verify metadata is set" - community.general.rax_files_objects: - container: testcont - type: meta - src: file17 - - - name: "Delete metadata" - community.general.rax_files_objects: - container: testcont - type: meta - dest: file17 - method: delete - meta: - key1: '' - key2: '' - - - name: "Get metadata on all objects" - community.general.rax_files_objects: - container: testcont - type: meta -''' - -import os - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -EXIT_DICT = dict(success=False) -META_PREFIX = 'x-object-meta-' - - -def _get_container(module, cf, container): - try: - return cf.get_container(container) - except pyrax.exc.NoSuchContainer as e: - module.fail_json(msg=e.message) - - -def _upload_folder(cf, folder, container, ttl=None, headers=None): - """ Uploads a folder to Cloud Files. - """ - total_bytes = 0 - for root, dirs, files in os.walk(folder): - for fname in files: - full_path = os.path.join(root, fname) - obj_name = os.path.relpath(full_path, folder) - obj_size = os.path.getsize(full_path) - cf.upload_file(container, full_path, - obj_name=obj_name, return_none=True, ttl=ttl, headers=headers) - total_bytes += obj_size - return total_bytes - - -def upload(module, cf, container, src, dest, meta, expires): - """ Uploads a single object or a folder to Cloud Files Optionally sets an - metadata, TTL value (expires), or Content-Disposition and Content-Encoding - headers. - """ - if not src: - module.fail_json(msg='src must be specified when uploading') - - c = _get_container(module, cf, container) - src = os.path.abspath(os.path.expanduser(src)) - is_dir = os.path.isdir(src) - - if not is_dir and not os.path.isfile(src) or not os.path.exists(src): - module.fail_json(msg='src must be a file or a directory') - if dest and is_dir: - module.fail_json(msg='dest cannot be set when whole ' - 'directories are uploaded') - - cont_obj = None - total_bytes = 0 - if dest and not is_dir: - try: - cont_obj = c.upload_file(src, obj_name=dest, ttl=expires, headers=meta) - except Exception as e: - module.fail_json(msg=e.message) - elif is_dir: - try: - total_bytes = _upload_folder(cf, src, c, ttl=expires, headers=meta) - except Exception as e: - module.fail_json(msg=e.message) - else: - try: - cont_obj = c.upload_file(src, ttl=expires, headers=meta) - except Exception as e: - module.fail_json(msg=e.message) - - EXIT_DICT['success'] = True - EXIT_DICT['container'] = c.name - EXIT_DICT['msg'] = "Uploaded %s to container: %s" % (src, c.name) - if cont_obj or total_bytes > 0: - EXIT_DICT['changed'] = True - if meta: - EXIT_DICT['meta'] = dict(updated=True) - - if cont_obj: - EXIT_DICT['bytes'] = cont_obj.total_bytes - EXIT_DICT['etag'] = cont_obj.etag - else: - EXIT_DICT['bytes'] = total_bytes - - module.exit_json(**EXIT_DICT) - - -def download(module, cf, container, src, dest, structure): - """ Download objects from Cloud Files to a local path specified by "dest". - Optionally disable maintaining a directory structure by by passing a - false value to "structure". - """ - # Looking for an explicit destination - if not dest: - module.fail_json(msg='dest is a required argument when ' - 'downloading from Cloud Files') - - # Attempt to fetch the container by name - c = _get_container(module, cf, container) - - # Accept a single object name or a comma-separated list of objs - # If not specified, get the entire container - if src: - objs = src.split(',') - objs = map(str.strip, objs) - else: - objs = c.get_object_names() - - dest = os.path.abspath(os.path.expanduser(dest)) - is_dir = os.path.isdir(dest) - - if not is_dir: - module.fail_json(msg='dest must be a directory') - - results = [] - for obj in objs: - try: - c.download_object(obj, dest, structure=structure) - except Exception as e: - module.fail_json(msg=e.message) - else: - results.append(obj) - - len_results = len(results) - len_objs = len(objs) - - EXIT_DICT['container'] = c.name - EXIT_DICT['requested_downloaded'] = results - if results: - EXIT_DICT['changed'] = True - if len_results == len_objs: - EXIT_DICT['success'] = True - EXIT_DICT['msg'] = "%s objects downloaded to %s" % (len_results, dest) - else: - EXIT_DICT['msg'] = "Error: only %s of %s objects were " \ - "downloaded" % (len_results, len_objs) - module.exit_json(**EXIT_DICT) - - -def delete(module, cf, container, src, dest): - """ Delete specific objects by proving a single file name or a - comma-separated list to src OR dest (but not both). Omitting file name(s) - assumes the entire container is to be deleted. - """ - objs = None - if src and dest: - module.fail_json(msg="Error: ambiguous instructions; files to be deleted " - "have been specified on both src and dest args") - elif dest: - objs = dest - else: - objs = src - - c = _get_container(module, cf, container) - - if objs: - objs = objs.split(',') - objs = map(str.strip, objs) - else: - objs = c.get_object_names() - - num_objs = len(objs) - - results = [] - for obj in objs: - try: - result = c.delete_object(obj) - except Exception as e: - module.fail_json(msg=e.message) - else: - results.append(result) - - num_deleted = results.count(True) - - EXIT_DICT['container'] = c.name - EXIT_DICT['deleted'] = num_deleted - EXIT_DICT['requested_deleted'] = objs - - if num_deleted: - EXIT_DICT['changed'] = True - - if num_objs == num_deleted: - EXIT_DICT['success'] = True - EXIT_DICT['msg'] = "%s objects deleted" % num_deleted - else: - EXIT_DICT['msg'] = ("Error: only %s of %s objects " - "deleted" % (num_deleted, num_objs)) - module.exit_json(**EXIT_DICT) - - -def get_meta(module, cf, container, src, dest): - """ Get metadata for a single file, comma-separated list, or entire - container - """ - c = _get_container(module, cf, container) - - objs = None - if src and dest: - module.fail_json(msg="Error: ambiguous instructions; files to be deleted " - "have been specified on both src and dest args") - elif dest: - objs = dest - else: - objs = src - - if objs: - objs = objs.split(',') - objs = map(str.strip, objs) - else: - objs = c.get_object_names() - - results = dict() - for obj in objs: - try: - meta = c.get_object(obj).get_metadata() - except Exception as e: - module.fail_json(msg=e.message) - else: - results[obj] = dict() - for k, v in meta.items(): - meta_key = k.split(META_PREFIX)[-1] - results[obj][meta_key] = v - - EXIT_DICT['container'] = c.name - if results: - EXIT_DICT['meta_results'] = results - EXIT_DICT['success'] = True - module.exit_json(**EXIT_DICT) - - -def put_meta(module, cf, container, src, dest, meta, clear_meta): - """ Set metadata on a container, single file, or comma-separated list. - Passing a true value to clear_meta clears the metadata stored in Cloud - Files before setting the new metadata to the value of "meta". - """ - objs = None - if src and dest: - module.fail_json(msg="Error: ambiguous instructions; files to set meta" - " have been specified on both src and dest args") - elif dest: - objs = dest - else: - objs = src - - objs = objs.split(',') - objs = map(str.strip, objs) - - c = _get_container(module, cf, container) - - results = [] - for obj in objs: - try: - result = c.get_object(obj).set_metadata(meta, clear=clear_meta) - except Exception as e: - module.fail_json(msg=e.message) - else: - results.append(result) - - EXIT_DICT['container'] = c.name - EXIT_DICT['success'] = True - if results: - EXIT_DICT['changed'] = True - EXIT_DICT['num_changed'] = True - module.exit_json(**EXIT_DICT) - - -def delete_meta(module, cf, container, src, dest, meta): - """ Removes metadata keys and values specified in meta, if any. Deletes on - all objects specified by src or dest (but not both), if any; otherwise it - deletes keys on all objects in the container - """ - objs = None - if src and dest: - module.fail_json(msg="Error: ambiguous instructions; meta keys to be " - "deleted have been specified on both src and dest" - " args") - elif dest: - objs = dest - else: - objs = src - - objs = objs.split(',') - objs = map(str.strip, objs) - - c = _get_container(module, cf, container) - - results = [] # Num of metadata keys removed, not objects affected - for obj in objs: - if meta: - for k, v in meta.items(): - try: - result = c.get_object(obj).remove_metadata_key(k) - except Exception as e: - module.fail_json(msg=e.message) - else: - results.append(result) - else: - try: - o = c.get_object(obj) - except pyrax.exc.NoSuchObject as e: - module.fail_json(msg=e.message) - - for k, v in o.get_metadata().items(): - try: - result = o.remove_metadata_key(k) - except Exception as e: - module.fail_json(msg=e.message) - results.append(result) - - EXIT_DICT['container'] = c.name - EXIT_DICT['success'] = True - if results: - EXIT_DICT['changed'] = True - EXIT_DICT['num_deleted'] = len(results) - module.exit_json(**EXIT_DICT) - - -def cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, - structure, expires): - """ Dispatch from here to work with metadata or file objects """ - cf = pyrax.cloudfiles - - if cf is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if typ == "file": - if method == 'put': - upload(module, cf, container, src, dest, meta, expires) - - elif method == 'get': - download(module, cf, container, src, dest, structure) - - elif method == 'delete': - delete(module, cf, container, src, dest) - - else: - if method == 'get': - get_meta(module, cf, container, src, dest) - - if method == 'put': - put_meta(module, cf, container, src, dest, meta, clear_meta) - - if method == 'delete': - delete_meta(module, cf, container, src, dest, meta) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - container=dict(required=True), - src=dict(), - dest=dict(), - method=dict(default='get', choices=['put', 'get', 'delete']), - type=dict(default='file', choices=['file', 'meta']), - meta=dict(type='dict', default=dict()), - clear_meta=dict(default=False, type='bool'), - structure=dict(default=True, type='bool'), - expires=dict(type='int'), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - container = module.params.get('container') - src = module.params.get('src') - dest = module.params.get('dest') - method = module.params.get('method') - typ = module.params.get('type') - meta = module.params.get('meta') - clear_meta = module.params.get('clear_meta') - structure = module.params.get('structure') - expires = module.params.get('expires') - - if clear_meta and not typ == 'meta': - module.fail_json(msg='clear_meta can only be used when setting metadata') - - setup_rax_module(module, pyrax) - cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, structure, expires) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_identity.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_identity.py deleted file mode 100644 index 2021052f..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_identity.py +++ /dev/null @@ -1,103 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_identity -short_description: Load Rackspace Cloud Identity -description: - - Verifies Rackspace Cloud credentials and returns identity information -options: - state: - type: str - description: - - Indicate desired state of the resource - choices: ['present'] - default: present - required: false -author: - - "Christopher H. Laco (@claco)" - - "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Load Rackspace Cloud Identity - gather_facts: False - hosts: local - connection: local - tasks: - - name: Load Identity - local_action: - module: rax_identity - credentials: ~/.raxpub - region: DFW - register: rackspace_identity -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, rax_required_together, rax_to_dict, - setup_rax_module) - - -def cloud_identity(module, state, identity): - instance = dict( - authenticated=identity.authenticated, - credentials=identity._creds_file - ) - changed = False - - instance.update(rax_to_dict(identity)) - instance['services'] = instance.get('services', {}).keys() - - if state == 'present': - if not identity.authenticated: - module.fail_json(msg='Credentials could not be verified!') - - module.exit_json(changed=changed, identity=instance) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present']) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - state = module.params.get('state') - - setup_rax_module(module, pyrax) - - if not pyrax.identity: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - cloud_identity(module, state, pyrax.identity) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_keypair.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_keypair.py deleted file mode 100644 index 90b0183e..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_keypair.py +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_keypair -short_description: Create a keypair for use with Rackspace Cloud Servers -description: - - Create a keypair for use with Rackspace Cloud Servers -options: - name: - type: str - description: - - Name of keypair - required: true - public_key: - type: str - description: - - Public Key string to upload. Can be a file path or string - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present -author: "Matt Martz (@sivel)" -notes: - - Keypairs cannot be manipulated, only created and deleted. To "update" a - keypair you must first delete and then recreate. - - The ability to specify a file path for the public key was added in 1.7 -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Create a keypair - hosts: localhost - gather_facts: False - tasks: - - name: Keypair request - local_action: - module: rax_keypair - credentials: ~/.raxpub - name: my_keypair - region: DFW - register: keypair - - name: Create local public key - local_action: - module: copy - content: "{{ keypair.keypair.public_key }}" - dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}.pub" - - name: Create local private key - local_action: - module: copy - content: "{{ keypair.keypair.private_key }}" - dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}" - -- name: Create a keypair - hosts: localhost - gather_facts: False - tasks: - - name: Keypair request - local_action: - module: rax_keypair - credentials: ~/.raxpub - name: my_keypair - public_key: "{{ lookup('file', 'authorized_keys/id_rsa.pub') }}" - region: DFW - register: keypair -''' -import os - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, - rax_required_together, - rax_to_dict, - setup_rax_module, - ) - - -def rax_keypair(module, name, public_key, state): - changed = False - - cs = pyrax.cloudservers - - if cs is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - keypair = {} - - if state == 'present': - if public_key and os.path.isfile(public_key): - try: - f = open(public_key) - public_key = f.read() - f.close() - except Exception as e: - module.fail_json(msg='Failed to load %s' % public_key) - - try: - keypair = cs.keypairs.find(name=name) - except cs.exceptions.NotFound: - try: - keypair = cs.keypairs.create(name, public_key) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - elif state == 'absent': - try: - keypair = cs.keypairs.find(name=name) - except Exception: - pass - - if keypair: - try: - keypair.delete() - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, keypair=rax_to_dict(keypair)) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - name=dict(required=True), - public_key=dict(), - state=dict(default='present', choices=['absent', 'present']), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - name = module.params.get('name') - public_key = module.params.get('public_key') - state = module.params.get('state') - - setup_rax_module(module, pyrax) - - rax_keypair(module, name, public_key, state) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_meta.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_meta.py deleted file mode 100644 index 3504181f..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_meta.py +++ /dev/null @@ -1,174 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_meta -short_description: Manipulate metadata for Rackspace Cloud Servers -description: - - Manipulate metadata for Rackspace Cloud Servers -options: - address: - type: str - description: - - Server IP address to modify metadata for, will match any IP assigned to - the server - id: - type: str - description: - - Server ID to modify metadata for - name: - type: str - description: - - Server name to modify metadata for - meta: - type: dict - description: - - A hash of metadata to associate with the instance -author: "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Set metadata for a server - hosts: all - gather_facts: False - tasks: - - name: Set metadata - local_action: - module: rax_meta - credentials: ~/.raxpub - name: "{{ inventory_hostname }}" - region: DFW - meta: - group: primary_group - groups: - - group_two - - group_three - app: my_app - - - name: Clear metadata - local_action: - module: rax_meta - credentials: ~/.raxpub - name: "{{ inventory_hostname }}" - region: DFW -''' - -import json - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module -from ansible.module_utils.six import string_types - - -def rax_meta(module, address, name, server_id, meta): - changed = False - - cs = pyrax.cloudservers - - if cs is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - search_opts = {} - if name: - search_opts = dict(name='^%s$' % name) - try: - servers = cs.servers.list(search_opts=search_opts) - except Exception as e: - module.fail_json(msg='%s' % e.message) - elif address: - servers = [] - try: - for server in cs.servers.list(): - for addresses in server.networks.values(): - if address in addresses: - servers.append(server) - break - except Exception as e: - module.fail_json(msg='%s' % e.message) - elif server_id: - servers = [] - try: - servers.append(cs.servers.get(server_id)) - except Exception as e: - pass - - if len(servers) > 1: - module.fail_json(msg='Multiple servers found matching provided ' - 'search parameters') - elif not servers: - module.fail_json(msg='Failed to find a server matching provided ' - 'search parameters') - - # Normalize and ensure all metadata values are strings - for k, v in meta.items(): - if isinstance(v, list): - meta[k] = ','.join(['%s' % i for i in v]) - elif isinstance(v, dict): - meta[k] = json.dumps(v) - elif not isinstance(v, string_types): - meta[k] = '%s' % v - - server = servers[0] - if server.metadata == meta: - changed = False - else: - changed = True - removed = set(server.metadata.keys()).difference(meta.keys()) - cs.servers.delete_meta(server, list(removed)) - cs.servers.set_meta(server, meta) - server.get() - - module.exit_json(changed=changed, meta=server.metadata) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - address=dict(), - id=dict(), - name=dict(), - meta=dict(type='dict', default=dict()), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - mutually_exclusive=[['address', 'id', 'name']], - required_one_of=[['address', 'id', 'name']], - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - address = module.params.get('address') - server_id = module.params.get('id') - name = module.params.get('name') - meta = module.params.get('meta') - - setup_rax_module(module, pyrax) - - rax_meta(module, address, name, server_id, meta) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_alarm.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_alarm.py deleted file mode 100644 index 7e99db3f..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_alarm.py +++ /dev/null @@ -1,228 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_mon_alarm -short_description: Create or delete a Rackspace Cloud Monitoring alarm. -description: -- Create or delete a Rackspace Cloud Monitoring alarm that associates an - existing rax_mon_entity, rax_mon_check, and rax_mon_notification_plan with - criteria that specify what conditions will trigger which levels of - notifications. Rackspace monitoring module flow | rax_mon_entity -> - rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan -> - *rax_mon_alarm* -options: - state: - type: str - description: - - Ensure that the alarm with this C(label) exists or does not exist. - choices: [ "present", "absent" ] - required: false - default: present - label: - type: str - description: - - Friendly name for this alarm, used to achieve idempotence. Must be a String - between 1 and 255 characters long. - required: true - entity_id: - type: str - description: - - ID of the entity this alarm is attached to. May be acquired by registering - the value of a rax_mon_entity task. - required: true - check_id: - type: str - description: - - ID of the check that should be alerted on. May be acquired by registering - the value of a rax_mon_check task. - required: true - notification_plan_id: - type: str - description: - - ID of the notification plan to trigger if this alarm fires. May be acquired - by registering the value of a rax_mon_notification_plan task. - required: true - criteria: - type: str - description: - - Alarm DSL that describes alerting conditions and their output states. Must - be between 1 and 16384 characters long. See - http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/alerts-language.html - for a reference on the alerting language. - disabled: - description: - - If yes, create this alarm, but leave it in an inactive state. Defaults to - no. - type: bool - default: false - metadata: - type: dict - description: - - Arbitrary key/value pairs to accompany the alarm. Must be a hash of String - keys and values between 1 and 255 characters long. -author: Ash Wilson (@smashwilson) -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Alarm example - gather_facts: False - hosts: local - connection: local - tasks: - - name: Ensure that a specific alarm exists. - community.general.rax_mon_alarm: - credentials: ~/.rax_pub - state: present - label: uhoh - entity_id: "{{ the_entity['entity']['id'] }}" - check_id: "{{ the_check['check']['id'] }}" - notification_plan_id: "{{ defcon1['notification_plan']['id'] }}" - criteria: > - if (rate(metric['average']) > 10) { - return new AlarmStatus(WARNING); - } - return new AlarmStatus(OK); - register: the_alarm -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -def alarm(module, state, label, entity_id, check_id, notification_plan_id, criteria, - disabled, metadata): - - if len(label) < 1 or len(label) > 255: - module.fail_json(msg='label must be between 1 and 255 characters long') - - if criteria and len(criteria) < 1 or len(criteria) > 16384: - module.fail_json(msg='criteria must be between 1 and 16384 characters long') - - # Coerce attributes. - - changed = False - alarm = None - - cm = pyrax.cloud_monitoring - if not cm: - module.fail_json(msg='Failed to instantiate client. This typically ' - 'indicates an invalid region or an incorrectly ' - 'capitalized region name.') - - existing = [a for a in cm.list_alarms(entity_id) if a.label == label] - - if existing: - alarm = existing[0] - - if state == 'present': - should_create = False - should_update = False - should_delete = False - - if len(existing) > 1: - module.fail_json(msg='%s existing alarms have the label %s.' % - (len(existing), label)) - - if alarm: - if check_id != alarm.check_id or notification_plan_id != alarm.notification_plan_id: - should_delete = should_create = True - - should_update = (disabled and disabled != alarm.disabled) or \ - (metadata and metadata != alarm.metadata) or \ - (criteria and criteria != alarm.criteria) - - if should_update and not should_delete: - cm.update_alarm(entity=entity_id, alarm=alarm, - criteria=criteria, disabled=disabled, - label=label, metadata=metadata) - changed = True - - if should_delete: - alarm.delete() - changed = True - else: - should_create = True - - if should_create: - alarm = cm.create_alarm(entity=entity_id, check=check_id, - notification_plan=notification_plan_id, - criteria=criteria, disabled=disabled, label=label, - metadata=metadata) - changed = True - else: - for a in existing: - a.delete() - changed = True - - if alarm: - alarm_dict = { - "id": alarm.id, - "label": alarm.label, - "check_id": alarm.check_id, - "notification_plan_id": alarm.notification_plan_id, - "criteria": alarm.criteria, - "disabled": alarm.disabled, - "metadata": alarm.metadata - } - module.exit_json(changed=changed, alarm=alarm_dict) - else: - module.exit_json(changed=changed) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present', 'absent']), - label=dict(required=True), - entity_id=dict(required=True), - check_id=dict(required=True), - notification_plan_id=dict(required=True), - criteria=dict(), - disabled=dict(type='bool', default=False), - metadata=dict(type='dict') - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - state = module.params.get('state') - label = module.params.get('label') - entity_id = module.params.get('entity_id') - check_id = module.params.get('check_id') - notification_plan_id = module.params.get('notification_plan_id') - criteria = module.params.get('criteria') - disabled = module.boolean(module.params.get('disabled')) - metadata = module.params.get('metadata') - - setup_rax_module(module, pyrax) - - alarm(module, state, label, entity_id, check_id, notification_plan_id, - criteria, disabled, metadata) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_check.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_check.py deleted file mode 100644 index 17a3932f..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_check.py +++ /dev/null @@ -1,320 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_mon_check -short_description: Create or delete a Rackspace Cloud Monitoring check for an - existing entity. -description: -- Create or delete a Rackspace Cloud Monitoring check associated with an - existing rax_mon_entity. A check is a specific test or measurement that is - performed, possibly from different monitoring zones, on the systems you - monitor. Rackspace monitoring module flow | rax_mon_entity -> - *rax_mon_check* -> rax_mon_notification -> rax_mon_notification_plan -> - rax_mon_alarm -options: - state: - type: str - description: - - Ensure that a check with this C(label) exists or does not exist. - choices: ["present", "absent"] - default: present - entity_id: - type: str - description: - - ID of the rax_mon_entity to target with this check. - required: true - label: - type: str - description: - - Defines a label for this check, between 1 and 64 characters long. - required: true - check_type: - type: str - description: - - The type of check to create. C(remote.) checks may be created on any - rax_mon_entity. C(agent.) checks may only be created on rax_mon_entities - that have a non-null C(agent_id). - - | - Choices for this option are: - - C(remote.dns) - - C(remote.ftp-banner) - - C(remote.http) - - C(remote.imap-banner) - - C(remote.mssql-banner) - - C(remote.mysql-banner) - - C(remote.ping) - - C(remote.pop3-banner) - - C(remote.postgresql-banner) - - C(remote.smtp-banner) - - C(remote.smtp) - - C(remote.ssh) - - C(remote.tcp) - - C(remote.telnet-banner) - - C(agent.filesystem) - - C(agent.memory) - - C(agent.load_average) - - C(agent.cpu) - - C(agent.disk) - - C(agent.network) - - C(agent.plugin) - required: true - monitoring_zones_poll: - type: str - description: - - Comma-separated list of the names of the monitoring zones the check should - run from. Available monitoring zones include mzdfw, mzhkg, mziad, mzlon, - mzord and mzsyd. Required for remote.* checks; prohibited for agent.* checks. - target_hostname: - type: str - description: - - One of `target_hostname` and `target_alias` is required for remote.* checks, - but prohibited for agent.* checks. The hostname this check should target. - Must be a valid IPv4, IPv6, or FQDN. - target_alias: - type: str - description: - - One of `target_alias` and `target_hostname` is required for remote.* checks, - but prohibited for agent.* checks. Use the corresponding key in the entity's - `ip_addresses` hash to resolve an IP address to target. - details: - type: dict - description: - - Additional details specific to the check type. Must be a hash of strings - between 1 and 255 characters long, or an array or object containing 0 to - 256 items. - disabled: - description: - - If "yes", ensure the check is created, but don't actually use it yet. - type: bool - default: false - metadata: - type: dict - description: - - Hash of arbitrary key-value pairs to accompany this check if it fires. - Keys and values must be strings between 1 and 255 characters long. - period: - type: int - description: - - The number of seconds between each time the check is performed. Must be - greater than the minimum period set on your account. - timeout: - type: int - description: - - The number of seconds this check will wait when attempting to collect - results. Must be less than the period. -author: Ash Wilson (@smashwilson) -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Create a monitoring check - gather_facts: False - hosts: local - connection: local - tasks: - - name: Associate a check with an existing entity. - community.general.rax_mon_check: - credentials: ~/.rax_pub - state: present - entity_id: "{{ the_entity['entity']['id'] }}" - label: the_check - check_type: remote.ping - monitoring_zones_poll: mziad,mzord,mzdfw - details: - count: 10 - meta: - hurf: durf - register: the_check -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -def cloud_check(module, state, entity_id, label, check_type, - monitoring_zones_poll, target_hostname, target_alias, details, - disabled, metadata, period, timeout): - - # Coerce attributes. - - if monitoring_zones_poll and not isinstance(monitoring_zones_poll, list): - monitoring_zones_poll = [monitoring_zones_poll] - - if period: - period = int(period) - - if timeout: - timeout = int(timeout) - - changed = False - check = None - - cm = pyrax.cloud_monitoring - if not cm: - module.fail_json(msg='Failed to instantiate client. This typically ' - 'indicates an invalid region or an incorrectly ' - 'capitalized region name.') - - entity = cm.get_entity(entity_id) - if not entity: - module.fail_json(msg='Failed to instantiate entity. "%s" may not be' - ' a valid entity id.' % entity_id) - - existing = [e for e in entity.list_checks() if e.label == label] - - if existing: - check = existing[0] - - if state == 'present': - if len(existing) > 1: - module.fail_json(msg='%s existing checks have a label of %s.' % - (len(existing), label)) - - should_delete = False - should_create = False - should_update = False - - if check: - # Details may include keys set to default values that are not - # included in the initial creation. - # - # Only force a recreation of the check if one of the *specified* - # keys is missing or has a different value. - if details: - for (key, value) in details.items(): - if key not in check.details: - should_delete = should_create = True - elif value != check.details[key]: - should_delete = should_create = True - - should_update = label != check.label or \ - (target_hostname and target_hostname != check.target_hostname) or \ - (target_alias and target_alias != check.target_alias) or \ - (disabled != check.disabled) or \ - (metadata and metadata != check.metadata) or \ - (period and period != check.period) or \ - (timeout and timeout != check.timeout) or \ - (monitoring_zones_poll and monitoring_zones_poll != check.monitoring_zones_poll) - - if should_update and not should_delete: - check.update(label=label, - disabled=disabled, - metadata=metadata, - monitoring_zones_poll=monitoring_zones_poll, - timeout=timeout, - period=period, - target_alias=target_alias, - target_hostname=target_hostname) - changed = True - else: - # The check doesn't exist yet. - should_create = True - - if should_delete: - check.delete() - - if should_create: - check = cm.create_check(entity, - label=label, - check_type=check_type, - target_hostname=target_hostname, - target_alias=target_alias, - monitoring_zones_poll=monitoring_zones_poll, - details=details, - disabled=disabled, - metadata=metadata, - period=period, - timeout=timeout) - changed = True - elif state == 'absent': - if check: - check.delete() - changed = True - else: - module.fail_json(msg='state must be either present or absent.') - - if check: - check_dict = { - "id": check.id, - "label": check.label, - "type": check.type, - "target_hostname": check.target_hostname, - "target_alias": check.target_alias, - "monitoring_zones_poll": check.monitoring_zones_poll, - "details": check.details, - "disabled": check.disabled, - "metadata": check.metadata, - "period": check.period, - "timeout": check.timeout - } - module.exit_json(changed=changed, check=check_dict) - else: - module.exit_json(changed=changed) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - entity_id=dict(required=True), - label=dict(required=True), - check_type=dict(required=True), - monitoring_zones_poll=dict(), - target_hostname=dict(), - target_alias=dict(), - details=dict(type='dict', default={}), - disabled=dict(type='bool', default=False), - metadata=dict(type='dict', default={}), - period=dict(type='int'), - timeout=dict(type='int'), - state=dict(default='present', choices=['present', 'absent']) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - entity_id = module.params.get('entity_id') - label = module.params.get('label') - check_type = module.params.get('check_type') - monitoring_zones_poll = module.params.get('monitoring_zones_poll') - target_hostname = module.params.get('target_hostname') - target_alias = module.params.get('target_alias') - details = module.params.get('details') - disabled = module.boolean(module.params.get('disabled')) - metadata = module.params.get('metadata') - period = module.params.get('period') - timeout = module.params.get('timeout') - - state = module.params.get('state') - - setup_rax_module(module, pyrax) - - cloud_check(module, state, entity_id, label, check_type, - monitoring_zones_poll, target_hostname, target_alias, details, - disabled, metadata, period, timeout) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_entity.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_entity.py deleted file mode 100644 index 2f8cdeef..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_entity.py +++ /dev/null @@ -1,192 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_mon_entity -short_description: Create or delete a Rackspace Cloud Monitoring entity -description: -- Create or delete a Rackspace Cloud Monitoring entity, which represents a device - to monitor. Entities associate checks and alarms with a target system and - provide a convenient, centralized place to store IP addresses. Rackspace - monitoring module flow | *rax_mon_entity* -> rax_mon_check -> - rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm -options: - label: - type: str - description: - - Defines a name for this entity. Must be a non-empty string between 1 and - 255 characters long. - required: true - state: - type: str - description: - - Ensure that an entity with this C(name) exists or does not exist. - choices: ["present", "absent"] - default: present - agent_id: - type: str - description: - - Rackspace monitoring agent on the target device to which this entity is - bound. Necessary to collect C(agent.) rax_mon_checks against this entity. - named_ip_addresses: - type: dict - description: - - Hash of IP addresses that may be referenced by name by rax_mon_checks - added to this entity. Must be a dictionary of with keys that are names - between 1 and 64 characters long, and values that are valid IPv4 or IPv6 - addresses. - metadata: - type: dict - description: - - Hash of arbitrary C(name), C(value) pairs that are passed to associated - rax_mon_alarms. Names and values must all be between 1 and 255 characters - long. -author: Ash Wilson (@smashwilson) -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Entity example - gather_facts: False - hosts: local - connection: local - tasks: - - name: Ensure an entity exists - community.general.rax_mon_entity: - credentials: ~/.rax_pub - state: present - label: my_entity - named_ip_addresses: - web_box: 192.0.2.4 - db_box: 192.0.2.5 - meta: - hurf: durf - register: the_entity -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -def cloud_monitoring(module, state, label, agent_id, named_ip_addresses, - metadata): - - if len(label) < 1 or len(label) > 255: - module.fail_json(msg='label must be between 1 and 255 characters long') - - changed = False - - cm = pyrax.cloud_monitoring - if not cm: - module.fail_json(msg='Failed to instantiate client. This typically ' - 'indicates an invalid region or an incorrectly ' - 'capitalized region name.') - - existing = [] - for entity in cm.list_entities(): - if label == entity.label: - existing.append(entity) - - entity = None - - if existing: - entity = existing[0] - - if state == 'present': - should_update = False - should_delete = False - should_create = False - - if len(existing) > 1: - module.fail_json(msg='%s existing entities have the label %s.' % - (len(existing), label)) - - if entity: - if named_ip_addresses and named_ip_addresses != entity.ip_addresses: - should_delete = should_create = True - - # Change an existing Entity, unless there's nothing to do. - should_update = agent_id and agent_id != entity.agent_id or \ - (metadata and metadata != entity.metadata) - - if should_update and not should_delete: - entity.update(agent_id, metadata) - changed = True - - if should_delete: - entity.delete() - else: - should_create = True - - if should_create: - # Create a new Entity. - entity = cm.create_entity(label=label, agent=agent_id, - ip_addresses=named_ip_addresses, - metadata=metadata) - changed = True - else: - # Delete the existing Entities. - for e in existing: - e.delete() - changed = True - - if entity: - entity_dict = { - "id": entity.id, - "name": entity.name, - "agent_id": entity.agent_id, - } - module.exit_json(changed=changed, entity=entity_dict) - else: - module.exit_json(changed=changed) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present', 'absent']), - label=dict(required=True), - agent_id=dict(), - named_ip_addresses=dict(type='dict', default={}), - metadata=dict(type='dict', default={}) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - state = module.params.get('state') - - label = module.params.get('label') - agent_id = module.params.get('agent_id') - named_ip_addresses = module.params.get('named_ip_addresses') - metadata = module.params.get('metadata') - - setup_rax_module(module, pyrax) - - cloud_monitoring(module, state, label, agent_id, named_ip_addresses, metadata) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_notification.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_notification.py deleted file mode 100644 index fb645c30..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_notification.py +++ /dev/null @@ -1,175 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_mon_notification -short_description: Create or delete a Rackspace Cloud Monitoring notification. -description: -- Create or delete a Rackspace Cloud Monitoring notification that specifies a - channel that can be used to communicate alarms, such as email, webhooks, or - PagerDuty. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check -> - *rax_mon_notification* -> rax_mon_notification_plan -> rax_mon_alarm -options: - state: - type: str - description: - - Ensure that the notification with this C(label) exists or does not exist. - choices: ['present', 'absent'] - default: present - label: - type: str - description: - - Defines a friendly name for this notification. String between 1 and 255 - characters long. - required: true - notification_type: - type: str - description: - - A supported notification type. - choices: ["webhook", "email", "pagerduty"] - required: true - details: - type: dict - description: - - Dictionary of key-value pairs used to initialize the notification. - Required keys and meanings vary with notification type. See - http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/ - service-notification-types-crud.html for details. - required: true -author: Ash Wilson (@smashwilson) -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Monitoring notification example - gather_facts: False - hosts: local - connection: local - tasks: - - name: Email me when something goes wrong. - rax_mon_entity: - credentials: ~/.rax_pub - label: omg - type: email - details: - address: me@mailhost.com - register: the_notification -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -def notification(module, state, label, notification_type, details): - - if len(label) < 1 or len(label) > 255: - module.fail_json(msg='label must be between 1 and 255 characters long') - - changed = False - notification = None - - cm = pyrax.cloud_monitoring - if not cm: - module.fail_json(msg='Failed to instantiate client. This typically ' - 'indicates an invalid region or an incorrectly ' - 'capitalized region name.') - - existing = [] - for n in cm.list_notifications(): - if n.label == label: - existing.append(n) - - if existing: - notification = existing[0] - - if state == 'present': - should_update = False - should_delete = False - should_create = False - - if len(existing) > 1: - module.fail_json(msg='%s existing notifications are labelled %s.' % - (len(existing), label)) - - if notification: - should_delete = (notification_type != notification.type) - - should_update = (details != notification.details) - - if should_update and not should_delete: - notification.update(details=notification.details) - changed = True - - if should_delete: - notification.delete() - else: - should_create = True - - if should_create: - notification = cm.create_notification(notification_type, - label=label, details=details) - changed = True - else: - for n in existing: - n.delete() - changed = True - - if notification: - notification_dict = { - "id": notification.id, - "type": notification.type, - "label": notification.label, - "details": notification.details - } - module.exit_json(changed=changed, notification=notification_dict) - else: - module.exit_json(changed=changed) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present', 'absent']), - label=dict(required=True), - notification_type=dict(required=True, choices=['webhook', 'email', 'pagerduty']), - details=dict(required=True, type='dict') - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - state = module.params.get('state') - - label = module.params.get('label') - notification_type = module.params.get('notification_type') - details = module.params.get('details') - - setup_rax_module(module, pyrax) - - notification(module, state, label, notification_type, details) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py deleted file mode 100644 index 25e50682..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py +++ /dev/null @@ -1,184 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_mon_notification_plan -short_description: Create or delete a Rackspace Cloud Monitoring notification - plan. -description: -- Create or delete a Rackspace Cloud Monitoring notification plan by - associating existing rax_mon_notifications with severity levels. Rackspace - monitoring module flow | rax_mon_entity -> rax_mon_check -> - rax_mon_notification -> *rax_mon_notification_plan* -> rax_mon_alarm -options: - state: - type: str - description: - - Ensure that the notification plan with this C(label) exists or does not - exist. - choices: ['present', 'absent'] - default: present - label: - type: str - description: - - Defines a friendly name for this notification plan. String between 1 and - 255 characters long. - required: true - critical_state: - type: list - elements: str - description: - - Notification list to use when the alarm state is CRITICAL. Must be an - array of valid rax_mon_notification ids. - warning_state: - type: list - elements: str - description: - - Notification list to use when the alarm state is WARNING. Must be an array - of valid rax_mon_notification ids. - ok_state: - type: list - elements: str - description: - - Notification list to use when the alarm state is OK. Must be an array of - valid rax_mon_notification ids. -author: Ash Wilson (@smashwilson) -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Example notification plan - gather_facts: False - hosts: local - connection: local - tasks: - - name: Establish who gets called when. - community.general.rax_mon_notification_plan: - credentials: ~/.rax_pub - state: present - label: defcon1 - critical_state: - - "{{ everyone['notification']['id'] }}" - warning_state: - - "{{ opsfloor['notification']['id'] }}" - register: defcon1 -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -def notification_plan(module, state, label, critical_state, warning_state, ok_state): - - if len(label) < 1 or len(label) > 255: - module.fail_json(msg='label must be between 1 and 255 characters long') - - changed = False - notification_plan = None - - cm = pyrax.cloud_monitoring - if not cm: - module.fail_json(msg='Failed to instantiate client. This typically ' - 'indicates an invalid region or an incorrectly ' - 'capitalized region name.') - - existing = [] - for n in cm.list_notification_plans(): - if n.label == label: - existing.append(n) - - if existing: - notification_plan = existing[0] - - if state == 'present': - should_create = False - should_delete = False - - if len(existing) > 1: - module.fail_json(msg='%s notification plans are labelled %s.' % - (len(existing), label)) - - if notification_plan: - should_delete = (critical_state and critical_state != notification_plan.critical_state) or \ - (warning_state and warning_state != notification_plan.warning_state) or \ - (ok_state and ok_state != notification_plan.ok_state) - - if should_delete: - notification_plan.delete() - should_create = True - else: - should_create = True - - if should_create: - notification_plan = cm.create_notification_plan(label=label, - critical_state=critical_state, - warning_state=warning_state, - ok_state=ok_state) - changed = True - else: - for np in existing: - np.delete() - changed = True - - if notification_plan: - notification_plan_dict = { - "id": notification_plan.id, - "critical_state": notification_plan.critical_state, - "warning_state": notification_plan.warning_state, - "ok_state": notification_plan.ok_state, - "metadata": notification_plan.metadata - } - module.exit_json(changed=changed, notification_plan=notification_plan_dict) - else: - module.exit_json(changed=changed) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present', 'absent']), - label=dict(required=True), - critical_state=dict(type='list', elements='str'), - warning_state=dict(type='list', elements='str'), - ok_state=dict(type='list', elements='str'), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - state = module.params.get('state') - - label = module.params.get('label') - critical_state = module.params.get('critical_state') - warning_state = module.params.get('warning_state') - ok_state = module.params.get('ok_state') - - setup_rax_module(module, pyrax) - - notification_plan(module, state, label, critical_state, warning_state, ok_state) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_network.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_network.py deleted file mode 100644 index 146c08c8..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_network.py +++ /dev/null @@ -1,139 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_network -short_description: create / delete an isolated network in Rackspace Public Cloud -description: - - creates / deletes a Rackspace Public Cloud isolated network. -options: - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - label: - type: str - description: - - Label (name) to give the network - required: yes - cidr: - type: str - description: - - cidr of the network being created -author: - - "Christopher H. Laco (@claco)" - - "Jesse Keating (@omgjlk)" -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Build an Isolated Network - gather_facts: False - - tasks: - - name: Network create request - local_action: - module: rax_network - credentials: ~/.raxpub - label: my-net - cidr: 192.168.3.0/24 - state: present -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -def cloud_network(module, state, label, cidr): - changed = False - network = None - networks = [] - - if not pyrax.cloud_networks: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if state == 'present': - if not cidr: - module.fail_json(msg='missing required arguments: cidr') - - try: - network = pyrax.cloud_networks.find_network_by_label(label) - except pyrax.exceptions.NetworkNotFound: - try: - network = pyrax.cloud_networks.create(label, cidr=cidr) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - elif state == 'absent': - try: - network = pyrax.cloud_networks.find_network_by_label(label) - network.delete() - changed = True - except pyrax.exceptions.NetworkNotFound: - pass - except Exception as e: - module.fail_json(msg='%s' % e.message) - - if network: - instance = dict(id=network.id, - label=network.label, - cidr=network.cidr) - networks.append(instance) - - module.exit_json(changed=changed, networks=networks) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - state=dict(default='present', - choices=['present', 'absent']), - label=dict(required=True), - cidr=dict() - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - state = module.params.get('state') - label = module.params.get('label') - cidr = module.params.get('cidr') - - setup_rax_module(module, pyrax) - - cloud_network(module, state, label, cidr) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_queue.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_queue.py deleted file mode 100644 index 46c942c7..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_queue.py +++ /dev/null @@ -1,140 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_queue -short_description: create / delete a queue in Rackspace Public Cloud -description: - - creates / deletes a Rackspace Public Cloud queue. -options: - name: - type: str - description: - - Name to give the queue - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present -author: - - "Christopher H. Laco (@claco)" - - "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Build a Queue - gather_facts: False - hosts: local - connection: local - tasks: - - name: Queue create request - local_action: - module: rax_queue - credentials: ~/.raxpub - name: my-queue - region: DFW - state: present - register: my_queue -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -def cloud_queue(module, state, name): - for arg in (state, name): - if not arg: - module.fail_json(msg='%s is required for rax_queue' % arg) - - changed = False - queues = [] - instance = {} - - cq = pyrax.queues - if not cq: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - for queue in cq.list(): - if name != queue.name: - continue - - queues.append(queue) - - if len(queues) > 1: - module.fail_json(msg='Multiple Queues were matched by name') - - if state == 'present': - if not queues: - try: - queue = cq.create(name) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - queue = queues[0] - - instance = dict(name=queue.name) - result = dict(changed=changed, queue=instance) - module.exit_json(**result) - - elif state == 'absent': - if queues: - queue = queues[0] - try: - queue.delete() - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, queue=instance) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - name=dict(), - state=dict(default='present', choices=['present', 'absent']), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - name = module.params.get('name') - state = module.params.get('state') - - setup_rax_module(module, pyrax) - - cloud_queue(module, state, name) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_scaling_group.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_scaling_group.py deleted file mode 100644 index 4080e4c6..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_scaling_group.py +++ /dev/null @@ -1,441 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_scaling_group -short_description: Manipulate Rackspace Cloud Autoscale Groups -description: - - Manipulate Rackspace Cloud Autoscale Groups -options: - config_drive: - description: - - Attach read-only configuration drive to server as label config-2 - type: bool - default: 'no' - cooldown: - type: int - description: - - The period of time, in seconds, that must pass before any scaling can - occur after the previous scaling. Must be an integer between 0 and - 86400 (24 hrs). - default: 300 - disk_config: - type: str - description: - - Disk partitioning strategy - - If not specified, it will fallback to C(auto). - choices: - - auto - - manual - files: - type: dict - description: - - 'Files to insert into the instance. Hash of C(remotepath: localpath)' - flavor: - type: str - description: - - flavor to use for the instance - required: true - image: - type: str - description: - - image to use for the instance. Can be an C(id), C(human_id) or C(name) - required: true - key_name: - type: str - description: - - key pair to use on the instance - loadbalancers: - type: list - elements: dict - description: - - List of load balancer C(id) and C(port) hashes - max_entities: - type: int - description: - - The maximum number of entities that are allowed in the scaling group. - Must be an integer between 0 and 1000. - required: true - meta: - type: dict - description: - - A hash of metadata to associate with the instance - min_entities: - type: int - description: - - The minimum number of entities that are allowed in the scaling group. - Must be an integer between 0 and 1000. - required: true - name: - type: str - description: - - Name to give the scaling group - required: true - networks: - type: list - elements: str - description: - - The network to attach to the instances. If specified, you must include - ALL networks including the public and private interfaces. Can be C(id) - or C(label). - default: - - public - - private - server_name: - type: str - description: - - The base name for servers created by Autoscale - required: true - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - user_data: - type: str - description: - - Data to be uploaded to the servers config drive. This option implies - I(config_drive). Can be a file path or a string - wait: - description: - - wait for the scaling group to finish provisioning the minimum amount of - servers - type: bool - default: 'no' - wait_timeout: - type: int - description: - - how long before wait gives up, in seconds - default: 300 -author: "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' ---- -- hosts: localhost - gather_facts: false - connection: local - tasks: - - community.general.rax_scaling_group: - credentials: ~/.raxpub - region: ORD - cooldown: 300 - flavor: performance1-1 - image: bb02b1a3-bc77-4d17-ab5b-421d89850fca - min_entities: 5 - max_entities: 10 - name: ASG Test - server_name: asgtest - loadbalancers: - - id: 228385 - port: 80 - register: asg -''' - -import base64 -import json -import os -import time - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, rax_find_image, rax_find_network, - rax_required_together, rax_to_dict, setup_rax_module) -from ansible.module_utils.six import string_types - - -def rax_asg(module, cooldown=300, disk_config=None, files=None, flavor=None, - image=None, key_name=None, loadbalancers=None, meta=None, - min_entities=0, max_entities=0, name=None, networks=None, - server_name=None, state='present', user_data=None, - config_drive=False, wait=True, wait_timeout=300): - files = {} if files is None else files - loadbalancers = [] if loadbalancers is None else loadbalancers - meta = {} if meta is None else meta - networks = [] if networks is None else networks - - changed = False - - au = pyrax.autoscale - if not au: - module.fail_json(msg='Failed to instantiate clients. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if user_data: - config_drive = True - - if user_data and os.path.isfile(user_data): - try: - f = open(user_data) - user_data = f.read() - f.close() - except Exception as e: - module.fail_json(msg='Failed to load %s' % user_data) - - if state == 'present': - # Normalize and ensure all metadata values are strings - if meta: - for k, v in meta.items(): - if isinstance(v, list): - meta[k] = ','.join(['%s' % i for i in v]) - elif isinstance(v, dict): - meta[k] = json.dumps(v) - elif not isinstance(v, string_types): - meta[k] = '%s' % v - - if image: - image = rax_find_image(module, pyrax, image) - - nics = [] - if networks: - for network in networks: - nics.extend(rax_find_network(module, pyrax, network)) - - for nic in nics: - # pyrax is currently returning net-id, but we need uuid - # this check makes this forward compatible for a time when - # pyrax uses uuid instead - if nic.get('net-id'): - nic.update(uuid=nic['net-id']) - del nic['net-id'] - - # Handle the file contents - personality = [] - if files: - for rpath in files.keys(): - lpath = os.path.expanduser(files[rpath]) - try: - f = open(lpath, 'r') - personality.append({ - 'path': rpath, - 'contents': f.read() - }) - f.close() - except Exception as e: - module.fail_json(msg='Failed to load %s' % lpath) - - lbs = [] - if loadbalancers: - for lb in loadbalancers: - try: - lb_id = int(lb.get('id')) - except (ValueError, TypeError): - module.fail_json(msg='Load balancer ID is not an integer: ' - '%s' % lb.get('id')) - try: - port = int(lb.get('port')) - except (ValueError, TypeError): - module.fail_json(msg='Load balancer port is not an ' - 'integer: %s' % lb.get('port')) - if not lb_id or not port: - continue - lbs.append((lb_id, port)) - - try: - sg = au.find(name=name) - except pyrax.exceptions.NoUniqueMatch as e: - module.fail_json(msg='%s' % e.message) - except pyrax.exceptions.NotFound: - try: - sg = au.create(name, cooldown=cooldown, - min_entities=min_entities, - max_entities=max_entities, - launch_config_type='launch_server', - server_name=server_name, image=image, - flavor=flavor, disk_config=disk_config, - metadata=meta, personality=personality, - networks=nics, load_balancers=lbs, - key_name=key_name, config_drive=config_drive, - user_data=user_data) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - if not changed: - # Scaling Group Updates - group_args = {} - if cooldown != sg.cooldown: - group_args['cooldown'] = cooldown - - if min_entities != sg.min_entities: - group_args['min_entities'] = min_entities - - if max_entities != sg.max_entities: - group_args['max_entities'] = max_entities - - if group_args: - changed = True - sg.update(**group_args) - - # Launch Configuration Updates - lc = sg.get_launch_config() - lc_args = {} - if server_name != lc.get('name'): - lc_args['server_name'] = server_name - - if image != lc.get('image'): - lc_args['image'] = image - - if flavor != lc.get('flavor'): - lc_args['flavor'] = flavor - - disk_config = disk_config or 'AUTO' - if ((disk_config or lc.get('disk_config')) and - disk_config != lc.get('disk_config', 'AUTO')): - lc_args['disk_config'] = disk_config - - if (meta or lc.get('meta')) and meta != lc.get('metadata'): - lc_args['metadata'] = meta - - test_personality = [] - for p in personality: - test_personality.append({ - 'path': p['path'], - 'contents': base64.b64encode(p['contents']) - }) - if ((test_personality or lc.get('personality')) and - test_personality != lc.get('personality')): - lc_args['personality'] = personality - - if nics != lc.get('networks'): - lc_args['networks'] = nics - - if lbs != lc.get('load_balancers'): - # Work around for https://github.com/rackspace/pyrax/pull/393 - lc_args['load_balancers'] = sg.manager._resolve_lbs(lbs) - - if key_name != lc.get('key_name'): - lc_args['key_name'] = key_name - - if config_drive != lc.get('config_drive', False): - lc_args['config_drive'] = config_drive - - if (user_data and - base64.b64encode(user_data) != lc.get('user_data')): - lc_args['user_data'] = user_data - - if lc_args: - # Work around for https://github.com/rackspace/pyrax/pull/389 - if 'flavor' not in lc_args: - lc_args['flavor'] = lc.get('flavor') - changed = True - sg.update_launch_config(**lc_args) - - sg.get() - - if wait: - end_time = time.time() + wait_timeout - infinite = wait_timeout == 0 - while infinite or time.time() < end_time: - state = sg.get_state() - if state["pending_capacity"] == 0: - break - - time.sleep(5) - - module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg)) - - else: - try: - sg = au.find(name=name) - sg.delete() - changed = True - except pyrax.exceptions.NotFound as e: - sg = {} - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg)) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - config_drive=dict(default=False, type='bool'), - cooldown=dict(type='int', default=300), - disk_config=dict(choices=['auto', 'manual']), - files=dict(type='dict', default={}), - flavor=dict(required=True), - image=dict(required=True), - key_name=dict(), - loadbalancers=dict(type='list', elements='dict'), - meta=dict(type='dict', default={}), - min_entities=dict(type='int', required=True), - max_entities=dict(type='int', required=True), - name=dict(required=True), - networks=dict(type='list', elements='str', default=['public', 'private']), - server_name=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - user_data=dict(no_log=True), - wait=dict(default=False, type='bool'), - wait_timeout=dict(default=300, type='int'), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - config_drive = module.params.get('config_drive') - cooldown = module.params.get('cooldown') - disk_config = module.params.get('disk_config') - if disk_config: - disk_config = disk_config.upper() - files = module.params.get('files') - flavor = module.params.get('flavor') - image = module.params.get('image') - key_name = module.params.get('key_name') - loadbalancers = module.params.get('loadbalancers') - meta = module.params.get('meta') - min_entities = module.params.get('min_entities') - max_entities = module.params.get('max_entities') - name = module.params.get('name') - networks = module.params.get('networks') - server_name = module.params.get('server_name') - state = module.params.get('state') - user_data = module.params.get('user_data') - - if not 0 <= min_entities <= 1000 or not 0 <= max_entities <= 1000: - module.fail_json(msg='min_entities and max_entities must be an ' - 'integer between 0 and 1000') - - if not 0 <= cooldown <= 86400: - module.fail_json(msg='cooldown must be an integer between 0 and 86400') - - setup_rax_module(module, pyrax) - - rax_asg(module, cooldown=cooldown, disk_config=disk_config, - files=files, flavor=flavor, image=image, meta=meta, - key_name=key_name, loadbalancers=loadbalancers, - min_entities=min_entities, max_entities=max_entities, - name=name, networks=networks, server_name=server_name, - state=state, config_drive=config_drive, user_data=user_data) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_scaling_policy.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_scaling_policy.py deleted file mode 100644 index be46bd62..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_scaling_policy.py +++ /dev/null @@ -1,287 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_scaling_policy -short_description: Manipulate Rackspace Cloud Autoscale Scaling Policy -description: - - Manipulate Rackspace Cloud Autoscale Scaling Policy -options: - at: - type: str - description: - - The UTC time when this policy will be executed. The time must be - formatted according to C(yyyy-MM-dd'T'HH:mm:ss.SSS) such as - C(2013-05-19T08:07:08Z) - change: - type: int - description: - - The change, either as a number of servers or as a percentage, to make - in the scaling group. If this is a percentage, you must set - I(is_percent) to C(true) also. - cron: - type: str - description: - - The time when the policy will be executed, as a cron entry. For - example, if this is parameter is set to C(1 0 * * *) - cooldown: - type: int - description: - - The period of time, in seconds, that must pass before any scaling can - occur after the previous scaling. Must be an integer between 0 and - 86400 (24 hrs). - default: 300 - desired_capacity: - type: int - description: - - The desired server capacity of the scaling the group; that is, how - many servers should be in the scaling group. - is_percent: - description: - - Whether the value in I(change) is a percent value - default: false - type: bool - name: - type: str - description: - - Name to give the policy - required: true - policy_type: - type: str - description: - - The type of policy that will be executed for the current release. - choices: - - webhook - - schedule - required: true - scaling_group: - type: str - description: - - Name of the scaling group that this policy will be added to - required: true - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present -author: "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' ---- -- hosts: localhost - gather_facts: false - connection: local - tasks: - - community.general.rax_scaling_policy: - credentials: ~/.raxpub - region: ORD - at: '2013-05-19T08:07:08Z' - change: 25 - cooldown: 300 - is_percent: true - name: ASG Test Policy - at - policy_type: schedule - scaling_group: ASG Test - register: asps_at - - - community.general.rax_scaling_policy: - credentials: ~/.raxpub - region: ORD - cron: '1 0 * * *' - change: 25 - cooldown: 300 - is_percent: true - name: ASG Test Policy - cron - policy_type: schedule - scaling_group: ASG Test - register: asp_cron - - - community.general.rax_scaling_policy: - credentials: ~/.raxpub - region: ORD - cooldown: 300 - desired_capacity: 5 - name: ASG Test Policy - webhook - policy_type: webhook - scaling_group: ASG Test - register: asp_webhook -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (UUID, rax_argument_spec, rax_required_together, rax_to_dict, - setup_rax_module) - - -def rax_asp(module, at=None, change=0, cron=None, cooldown=300, - desired_capacity=0, is_percent=False, name=None, - policy_type=None, scaling_group=None, state='present'): - changed = False - - au = pyrax.autoscale - if not au: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - try: - UUID(scaling_group) - except ValueError: - try: - sg = au.find(name=scaling_group) - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - try: - sg = au.get(scaling_group) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - if state == 'present': - policies = filter(lambda p: name == p.name, sg.list_policies()) - if len(policies) > 1: - module.fail_json(msg='No unique policy match found by name') - if at: - args = dict(at=at) - elif cron: - args = dict(cron=cron) - else: - args = None - - if not policies: - try: - policy = sg.add_policy(name, policy_type=policy_type, - cooldown=cooldown, change=change, - is_percent=is_percent, - desired_capacity=desired_capacity, - args=args) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - else: - policy = policies[0] - kwargs = {} - if policy_type != policy.type: - kwargs['policy_type'] = policy_type - - if cooldown != policy.cooldown: - kwargs['cooldown'] = cooldown - - if hasattr(policy, 'change') and change != policy.change: - kwargs['change'] = change - - if hasattr(policy, 'changePercent') and is_percent is False: - kwargs['change'] = change - kwargs['is_percent'] = False - elif hasattr(policy, 'change') and is_percent is True: - kwargs['change'] = change - kwargs['is_percent'] = True - - if hasattr(policy, 'desiredCapacity') and change: - kwargs['change'] = change - elif ((hasattr(policy, 'change') or - hasattr(policy, 'changePercent')) and desired_capacity): - kwargs['desired_capacity'] = desired_capacity - - if hasattr(policy, 'args') and args != policy.args: - kwargs['args'] = args - - if kwargs: - policy.update(**kwargs) - changed = True - - policy.get() - - module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy)) - - else: - try: - policies = filter(lambda p: name == p.name, sg.list_policies()) - if len(policies) > 1: - module.fail_json(msg='No unique policy match found by name') - elif not policies: - policy = {} - else: - policy.delete() - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy)) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - at=dict(), - change=dict(type='int'), - cron=dict(), - cooldown=dict(type='int', default=300), - desired_capacity=dict(type='int'), - is_percent=dict(type='bool', default=False), - name=dict(required=True), - policy_type=dict(required=True, choices=['webhook', 'schedule']), - scaling_group=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - mutually_exclusive=[ - ['cron', 'at'], - ['change', 'desired_capacity'], - ] - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - at = module.params.get('at') - change = module.params.get('change') - cron = module.params.get('cron') - cooldown = module.params.get('cooldown') - desired_capacity = module.params.get('desired_capacity') - is_percent = module.params.get('is_percent') - name = module.params.get('name') - policy_type = module.params.get('policy_type') - scaling_group = module.params.get('scaling_group') - state = module.params.get('state') - - if (at or cron) and policy_type == 'webhook': - module.fail_json(msg='policy_type=schedule is required for a time ' - 'based policy') - - setup_rax_module(module, pyrax) - - rax_asp(module, at=at, change=change, cron=cron, cooldown=cooldown, - desired_capacity=desired_capacity, is_percent=is_percent, - name=name, policy_type=policy_type, scaling_group=scaling_group, - state=state) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_compute.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_compute.py deleted file mode 100644 index a195d7fb..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_compute.py +++ /dev/null @@ -1,693 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Scaleway Compute management module -# -# Copyright (C) 2018 Online SAS. -# https://www.scaleway.com -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: scaleway_compute -short_description: Scaleway compute management module -author: Remy Leone (@remyleone) -description: - - "This module manages compute instances on Scaleway." -extends_documentation_fragment: -- community.general.scaleway - - -options: - - public_ip: - type: str - description: - - Manage public IP on a Scaleway server - - Could be Scaleway IP address UUID - - C(dynamic) Means that IP is destroyed at the same time the host is destroyed - - C(absent) Means no public IP at all - default: absent - - enable_ipv6: - description: - - Enable public IPv6 connectivity on the instance - default: false - type: bool - - image: - type: str - description: - - Image identifier used to start the instance with - required: true - - name: - type: str - description: - - Name of the instance - - organization: - type: str - description: - - Organization identifier. - - Exactly one of I(project) and I(organization) must be specified. - - project: - type: str - description: - - Project identifier. - - Exactly one of I(project) and I(organization) must be specified. - version_added: 4.3.0 - - state: - type: str - description: - - Indicate desired state of the instance. - default: present - choices: - - present - - absent - - running - - restarted - - stopped - - tags: - type: list - elements: str - description: - - List of tags to apply to the instance (5 max) - required: false - default: [] - - region: - type: str - description: - - Scaleway compute zone - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 - - commercial_type: - type: str - description: - - Commercial name of the compute node - required: true - - wait: - description: - - Wait for the instance to reach its desired state before returning. - type: bool - default: 'no' - - wait_timeout: - type: int - description: - - Time to wait for the server to reach the expected state - required: false - default: 300 - - wait_sleep_time: - type: int - description: - - Time to wait before every attempt to check the state of the server - required: false - default: 3 - - security_group: - type: str - description: - - Security group unique identifier - - If no value provided, the default security group or current security group will be used - required: false -''' - -EXAMPLES = ''' -- name: Create a server - community.general.scaleway_compute: - name: foobar - state: present - image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe - project: 951df375-e094-4d26-97c1-ba548eeb9c42 - region: ams1 - commercial_type: VC1S - tags: - - test - - www - -- name: Create a server attached to a security group - community.general.scaleway_compute: - name: foobar - state: present - image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe - project: 951df375-e094-4d26-97c1-ba548eeb9c42 - region: ams1 - commercial_type: VC1S - security_group: 4a31b633-118e-4900-bd52-facf1085fc8d - tags: - - test - - www - -- name: Destroy it right after - community.general.scaleway_compute: - name: foobar - state: absent - image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe - project: 951df375-e094-4d26-97c1-ba548eeb9c42 - region: ams1 - commercial_type: VC1S -''' - -RETURN = ''' -''' - -import datetime -import time - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import quote as urlquote -from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway - -SCALEWAY_SERVER_STATES = ( - 'stopped', - 'stopping', - 'starting', - 'running', - 'locked' -) - -SCALEWAY_TRANSITIONS_STATES = ( - "stopping", - "starting", - "pending" -) - - -def check_image_id(compute_api, image_id): - response = compute_api.get(path="images/%s" % image_id) - - if not response.ok: - msg = 'Error in getting image %s on %s : %s' % (image_id, compute_api.module.params.get('api_url'), response.json) - compute_api.module.fail_json(msg=msg) - - -def fetch_state(compute_api, server): - compute_api.module.debug("fetch_state of server: %s" % server["id"]) - response = compute_api.get(path="servers/%s" % server["id"]) - - if response.status_code == 404: - return "absent" - - if not response.ok: - msg = 'Error during state fetching: (%s) %s' % (response.status_code, response.json) - compute_api.module.fail_json(msg=msg) - - try: - compute_api.module.debug("Server %s in state: %s" % (server["id"], response.json["server"]["state"])) - return response.json["server"]["state"] - except KeyError: - compute_api.module.fail_json(msg="Could not fetch state in %s" % response.json) - - -def wait_to_complete_state_transition(compute_api, server, wait=None): - if wait is None: - wait = compute_api.module.params["wait"] - if not wait: - return - - wait_timeout = compute_api.module.params["wait_timeout"] - wait_sleep_time = compute_api.module.params["wait_sleep_time"] - - start = datetime.datetime.utcnow() - end = start + datetime.timedelta(seconds=wait_timeout) - while datetime.datetime.utcnow() < end: - compute_api.module.debug("We are going to wait for the server to finish its transition") - if fetch_state(compute_api, server) not in SCALEWAY_TRANSITIONS_STATES: - compute_api.module.debug("It seems that the server is not in transition anymore.") - compute_api.module.debug("Server in state: %s" % fetch_state(compute_api, server)) - break - time.sleep(wait_sleep_time) - else: - compute_api.module.fail_json(msg="Server takes too long to finish its transition") - - -def public_ip_payload(compute_api, public_ip): - # We don't want a public ip - if public_ip in ("absent",): - return {"dynamic_ip_required": False} - - # IP is only attached to the instance and is released as soon as the instance terminates - if public_ip in ("dynamic", "allocated"): - return {"dynamic_ip_required": True} - - # We check that the IP we want to attach exists, if so its ID is returned - response = compute_api.get("ips") - if not response.ok: - msg = 'Error during public IP validation: (%s) %s' % (response.status_code, response.json) - compute_api.module.fail_json(msg=msg) - - ip_list = [] - try: - ip_list = response.json["ips"] - except KeyError: - compute_api.module.fail_json(msg="Error in getting the IP information from: %s" % response.json) - - lookup = [ip["id"] for ip in ip_list] - if public_ip in lookup: - return {"public_ip": public_ip} - - -def create_server(compute_api, server): - compute_api.module.debug("Starting a create_server") - target_server = None - data = {"enable_ipv6": server["enable_ipv6"], - "tags": server["tags"], - "commercial_type": server["commercial_type"], - "image": server["image"], - "dynamic_ip_required": server["dynamic_ip_required"], - "name": server["name"] - } - - if server["project"]: - data["project"] = server["project"] - - if server["organization"]: - data["organization"] = server["organization"] - - if server["security_group"]: - data["security_group"] = server["security_group"] - - response = compute_api.post(path="servers", data=data) - - if not response.ok: - msg = 'Error during server creation: (%s) %s' % (response.status_code, response.json) - compute_api.module.fail_json(msg=msg) - - try: - target_server = response.json["server"] - except KeyError: - compute_api.module.fail_json(msg="Error in getting the server information from: %s" % response.json) - - wait_to_complete_state_transition(compute_api=compute_api, server=target_server) - - return target_server - - -def restart_server(compute_api, server): - return perform_action(compute_api=compute_api, server=server, action="reboot") - - -def stop_server(compute_api, server): - return perform_action(compute_api=compute_api, server=server, action="poweroff") - - -def start_server(compute_api, server): - return perform_action(compute_api=compute_api, server=server, action="poweron") - - -def perform_action(compute_api, server, action): - response = compute_api.post(path="servers/%s/action" % server["id"], - data={"action": action}) - if not response.ok: - msg = 'Error during server %s: (%s) %s' % (action, response.status_code, response.json) - compute_api.module.fail_json(msg=msg) - - wait_to_complete_state_transition(compute_api=compute_api, server=server) - - return response - - -def remove_server(compute_api, server): - compute_api.module.debug("Starting remove server strategy") - response = compute_api.delete(path="servers/%s" % server["id"]) - if not response.ok: - msg = 'Error during server deletion: (%s) %s' % (response.status_code, response.json) - compute_api.module.fail_json(msg=msg) - - wait_to_complete_state_transition(compute_api=compute_api, server=server) - - return response - - -def present_strategy(compute_api, wished_server): - compute_api.module.debug("Starting present strategy") - changed = False - query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1) - - if not query_results: - changed = True - if compute_api.module.check_mode: - return changed, {"status": "A server would be created."} - - target_server = create_server(compute_api=compute_api, server=wished_server) - else: - target_server = query_results[0] - - if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server, - wished_server=wished_server): - changed = True - - if compute_api.module.check_mode: - return changed, {"status": "Server %s attributes would be changed." % target_server["id"]} - - target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server) - - return changed, target_server - - -def absent_strategy(compute_api, wished_server): - compute_api.module.debug("Starting absent strategy") - changed = False - target_server = None - query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1) - - if not query_results: - return changed, {"status": "Server already absent."} - else: - target_server = query_results[0] - - changed = True - - if compute_api.module.check_mode: - return changed, {"status": "Server %s would be made absent." % target_server["id"]} - - # A server MUST be stopped to be deleted. - while fetch_state(compute_api=compute_api, server=target_server) != "stopped": - wait_to_complete_state_transition(compute_api=compute_api, server=target_server, wait=True) - response = stop_server(compute_api=compute_api, server=target_server) - - if not response.ok: - err_msg = 'Error while stopping a server before removing it [{0}: {1}]'.format(response.status_code, - response.json) - compute_api.module.fail_json(msg=err_msg) - - wait_to_complete_state_transition(compute_api=compute_api, server=target_server, wait=True) - - response = remove_server(compute_api=compute_api, server=target_server) - - if not response.ok: - err_msg = 'Error while removing server [{0}: {1}]'.format(response.status_code, response.json) - compute_api.module.fail_json(msg=err_msg) - - return changed, {"status": "Server %s deleted" % target_server["id"]} - - -def running_strategy(compute_api, wished_server): - compute_api.module.debug("Starting running strategy") - changed = False - query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1) - - if not query_results: - changed = True - if compute_api.module.check_mode: - return changed, {"status": "A server would be created before being run."} - - target_server = create_server(compute_api=compute_api, server=wished_server) - else: - target_server = query_results[0] - - if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server, - wished_server=wished_server): - changed = True - - if compute_api.module.check_mode: - return changed, {"status": "Server %s attributes would be changed before running it." % target_server["id"]} - - target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server) - - current_state = fetch_state(compute_api=compute_api, server=target_server) - if current_state not in ("running", "starting"): - compute_api.module.debug("running_strategy: Server in state: %s" % current_state) - changed = True - - if compute_api.module.check_mode: - return changed, {"status": "Server %s attributes would be changed." % target_server["id"]} - - response = start_server(compute_api=compute_api, server=target_server) - if not response.ok: - msg = 'Error while running server [{0}: {1}]'.format(response.status_code, response.json) - compute_api.module.fail_json(msg=msg) - - return changed, target_server - - -def stop_strategy(compute_api, wished_server): - compute_api.module.debug("Starting stop strategy") - query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1) - - changed = False - - if not query_results: - - if compute_api.module.check_mode: - return changed, {"status": "A server would be created before being stopped."} - - target_server = create_server(compute_api=compute_api, server=wished_server) - changed = True - else: - target_server = query_results[0] - - compute_api.module.debug("stop_strategy: Servers are found.") - - if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server, - wished_server=wished_server): - changed = True - - if compute_api.module.check_mode: - return changed, { - "status": "Server %s attributes would be changed before stopping it." % target_server["id"]} - - target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server) - - wait_to_complete_state_transition(compute_api=compute_api, server=target_server) - - current_state = fetch_state(compute_api=compute_api, server=target_server) - if current_state not in ("stopped",): - compute_api.module.debug("stop_strategy: Server in state: %s" % current_state) - - changed = True - - if compute_api.module.check_mode: - return changed, {"status": "Server %s would be stopped." % target_server["id"]} - - response = stop_server(compute_api=compute_api, server=target_server) - compute_api.module.debug(response.json) - compute_api.module.debug(response.ok) - - if not response.ok: - msg = 'Error while stopping server [{0}: {1}]'.format(response.status_code, response.json) - compute_api.module.fail_json(msg=msg) - - return changed, target_server - - -def restart_strategy(compute_api, wished_server): - compute_api.module.debug("Starting restart strategy") - changed = False - query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1) - - if not query_results: - changed = True - if compute_api.module.check_mode: - return changed, {"status": "A server would be created before being rebooted."} - - target_server = create_server(compute_api=compute_api, server=wished_server) - else: - target_server = query_results[0] - - if server_attributes_should_be_changed(compute_api=compute_api, - target_server=target_server, - wished_server=wished_server): - changed = True - - if compute_api.module.check_mode: - return changed, { - "status": "Server %s attributes would be changed before rebooting it." % target_server["id"]} - - target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server) - - changed = True - if compute_api.module.check_mode: - return changed, {"status": "Server %s would be rebooted." % target_server["id"]} - - wait_to_complete_state_transition(compute_api=compute_api, server=target_server) - - if fetch_state(compute_api=compute_api, server=target_server) in ("running",): - response = restart_server(compute_api=compute_api, server=target_server) - wait_to_complete_state_transition(compute_api=compute_api, server=target_server) - if not response.ok: - msg = 'Error while restarting server that was running [{0}: {1}].'.format(response.status_code, - response.json) - compute_api.module.fail_json(msg=msg) - - if fetch_state(compute_api=compute_api, server=target_server) in ("stopped",): - response = restart_server(compute_api=compute_api, server=target_server) - wait_to_complete_state_transition(compute_api=compute_api, server=target_server) - if not response.ok: - msg = 'Error while restarting server that was stopped [{0}: {1}].'.format(response.status_code, - response.json) - compute_api.module.fail_json(msg=msg) - - return changed, target_server - - -state_strategy = { - "present": present_strategy, - "restarted": restart_strategy, - "stopped": stop_strategy, - "running": running_strategy, - "absent": absent_strategy -} - - -def find(compute_api, wished_server, per_page=1): - compute_api.module.debug("Getting inside find") - # Only the name attribute is accepted in the Compute query API - response = compute_api.get("servers", params={"name": wished_server["name"], - "per_page": per_page}) - - if not response.ok: - msg = 'Error during server search: (%s) %s' % (response.status_code, response.json) - compute_api.module.fail_json(msg=msg) - - search_results = response.json["servers"] - - return search_results - - -PATCH_MUTABLE_SERVER_ATTRIBUTES = ( - "ipv6", - "tags", - "name", - "dynamic_ip_required", - "security_group", -) - - -def server_attributes_should_be_changed(compute_api, target_server, wished_server): - compute_api.module.debug("Checking if server attributes should be changed") - compute_api.module.debug("Current Server: %s" % target_server) - compute_api.module.debug("Wished Server: %s" % wished_server) - debug_dict = dict((x, (target_server[x], wished_server[x])) - for x in PATCH_MUTABLE_SERVER_ATTRIBUTES - if x in target_server and x in wished_server) - compute_api.module.debug("Debug dict %s" % debug_dict) - try: - for key in PATCH_MUTABLE_SERVER_ATTRIBUTES: - if key in target_server and key in wished_server: - # When you are working with dict, only ID matter as we ask user to put only the resource ID in the playbook - if isinstance(target_server[key], dict) and wished_server[key] and "id" in target_server[key].keys( - ) and target_server[key]["id"] != wished_server[key]: - return True - # Handling other structure compare simply the two objects content - elif not isinstance(target_server[key], dict) and target_server[key] != wished_server[key]: - return True - return False - except AttributeError: - compute_api.module.fail_json(msg="Error while checking if attributes should be changed") - - -def server_change_attributes(compute_api, target_server, wished_server): - compute_api.module.debug("Starting patching server attributes") - patch_payload = dict() - - for key in PATCH_MUTABLE_SERVER_ATTRIBUTES: - if key in target_server and key in wished_server: - # When you are working with dict, only ID matter as we ask user to put only the resource ID in the playbook - if isinstance(target_server[key], dict) and "id" in target_server[key] and wished_server[key]: - # Setting all key to current value except ID - key_dict = dict((x, target_server[key][x]) for x in target_server[key].keys() if x != "id") - # Setting ID to the user specified ID - key_dict["id"] = wished_server[key] - patch_payload[key] = key_dict - elif not isinstance(target_server[key], dict): - patch_payload[key] = wished_server[key] - - response = compute_api.patch(path="servers/%s" % target_server["id"], - data=patch_payload) - if not response.ok: - msg = 'Error during server attributes patching: (%s) %s' % (response.status_code, response.json) - compute_api.module.fail_json(msg=msg) - - try: - target_server = response.json["server"] - except KeyError: - compute_api.module.fail_json(msg="Error in getting the server information from: %s" % response.json) - - wait_to_complete_state_transition(compute_api=compute_api, server=target_server) - - return target_server - - -def core(module): - region = module.params["region"] - wished_server = { - "state": module.params["state"], - "image": module.params["image"], - "name": module.params["name"], - "commercial_type": module.params["commercial_type"], - "enable_ipv6": module.params["enable_ipv6"], - "tags": module.params["tags"], - "organization": module.params["organization"], - "project": module.params["project"], - "security_group": module.params["security_group"] - } - module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] - - compute_api = Scaleway(module=module) - - check_image_id(compute_api, wished_server["image"]) - - # IP parameters of the wished server depends on the configuration - ip_payload = public_ip_payload(compute_api=compute_api, public_ip=module.params["public_ip"]) - wished_server.update(ip_payload) - - changed, summary = state_strategy[wished_server["state"]](compute_api=compute_api, wished_server=wished_server) - module.exit_json(changed=changed, msg=summary) - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - image=dict(required=True), - name=dict(), - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - commercial_type=dict(required=True), - enable_ipv6=dict(default=False, type="bool"), - public_ip=dict(default="absent"), - state=dict(choices=list(state_strategy.keys()), default='present'), - tags=dict(type="list", elements="str", default=[]), - organization=dict(), - project=dict(), - wait=dict(type="bool", default=False), - wait_timeout=dict(type="int", default=300), - wait_sleep_time=dict(type="int", default=3), - security_group=dict(), - )) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=[ - ('organization', 'project'), - ], - required_one_of=[ - ('organization', 'project'), - ], - ) - - core(module) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_database_backup.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_database_backup.py deleted file mode 100644 index 35f35f82..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_database_backup.py +++ /dev/null @@ -1,372 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Scaleway database backups management module -# -# Copyright (C) 2020 Guillaume Rodriguez (g.rodriguez@opendecide.com). -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: scaleway_database_backup -short_description: Scaleway database backups management module -version_added: 1.2.0 -author: Guillaume Rodriguez (@guillaume_ro_fr) -description: - - This module manages database backups on Scaleway account U(https://developer.scaleway.com). -extends_documentation_fragment: - - community.general.scaleway -options: - state: - description: - - Indicate desired state of the database backup. - - C(present) creates a backup. - - C(absent) deletes the backup. - - C(exported) creates a download link for the backup. - - C(restored) restores the backup to a new database. - type: str - default: present - choices: - - present - - absent - - exported - - restored - - region: - description: - - Scaleway region to use (for example C(fr-par)). - type: str - required: true - choices: - - fr-par - - nl-ams - - pl-waw - - id: - description: - - UUID used to identify the database backup. - - Required for C(absent), C(exported) and C(restored) states. - type: str - - name: - description: - - Name used to identify the database backup. - - Required for C(present) state. - - Ignored when C(state=absent), C(state=exported) or C(state=restored). - type: str - required: false - - database_name: - description: - - Name used to identify the database. - - Required for C(present) and C(restored) states. - - Ignored when C(state=absent) or C(state=exported). - type: str - required: false - - instance_id: - description: - - UUID of the instance associated to the database backup. - - Required for C(present) and C(restored) states. - - Ignored when C(state=absent) or C(state=exported). - type: str - required: false - - expires_at: - description: - - Expiration datetime of the database backup (ISO 8601 format). - - Ignored when C(state=absent), C(state=exported) or C(state=restored). - type: str - required: false - - wait: - description: - - Wait for the instance to reach its desired state before returning. - type: bool - default: false - - wait_timeout: - description: - - Time to wait for the backup to reach the expected state. - type: int - required: false - default: 300 - - wait_sleep_time: - description: - - Time to wait before every attempt to check the state of the backup. - type: int - required: false - default: 3 -''' - -EXAMPLES = ''' - - name: Create a backup - community.general.scaleway_database_backup: - name: 'my_backup' - state: present - region: 'fr-par' - database_name: 'my-database' - instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb' - - - name: Export a backup - community.general.scaleway_database_backup: - id: '6ef1125a-037e-494f-a911-6d9c49a51691' - state: exported - region: 'fr-par' - - - name: Restore a backup - community.general.scaleway_database_backup: - id: '6ef1125a-037e-494f-a911-6d9c49a51691' - state: restored - region: 'fr-par' - database_name: 'my-new-database' - instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb' - - - name: Remove a backup - community.general.scaleway_database_backup: - id: '6ef1125a-037e-494f-a911-6d9c49a51691' - state: absent - region: 'fr-par' -''' - -RETURN = ''' -metadata: - description: Backup metadata. - returned: when C(state=present), C(state=exported) or C(state=restored) - type: dict - sample: { - "metadata": { - "created_at": "2020-08-06T12:42:05.631049Z", - "database_name": "my-database", - "download_url": null, - "download_url_expires_at": null, - "expires_at": null, - "id": "a15297bd-0c4a-4b4f-8fbb-b36a35b7eb07", - "instance_id": "617be32e-6497-4ed7-b4c7-0ee5a81edf49", - "instance_name": "my-instance", - "name": "backup_name", - "region": "fr-par", - "size": 600000, - "status": "ready", - "updated_at": "2020-08-06T12:42:10.581649Z" - } - } -''' - -import datetime -import time - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.scaleway import ( - Scaleway, - scaleway_argument_spec, - SCALEWAY_REGIONS, -) - -stable_states = ( - 'ready', - 'deleting', -) - - -def wait_to_complete_state_transition(module, account_api, backup=None): - wait_timeout = module.params['wait_timeout'] - wait_sleep_time = module.params['wait_sleep_time'] - - if backup is None or backup['status'] in stable_states: - return backup - - start = datetime.datetime.utcnow() - end = start + datetime.timedelta(seconds=wait_timeout) - while datetime.datetime.utcnow() < end: - module.debug('We are going to wait for the backup to finish its transition') - - response = account_api.get('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id'])) - if not response.ok: - module.fail_json(msg='Error getting backup [{0}: {1}]'.format(response.status_code, response.json)) - break - response_json = response.json - - if response_json['status'] in stable_states: - module.debug('It seems that the backup is not in transition anymore.') - module.debug('Backup in state: %s' % response_json['status']) - return response_json - time.sleep(wait_sleep_time) - else: - module.fail_json(msg='Backup takes too long to finish its transition') - - -def present_strategy(module, account_api, backup): - name = module.params['name'] - database_name = module.params['database_name'] - instance_id = module.params['instance_id'] - expiration_date = module.params['expires_at'] - - if backup is not None: - if (backup['name'] == name or name is None) and ( - backup['expires_at'] == expiration_date or expiration_date is None): - wait_to_complete_state_transition(module, account_api, backup) - module.exit_json(changed=False) - - if module.check_mode: - module.exit_json(changed=True) - - payload = {} - if name is not None: - payload['name'] = name - if expiration_date is not None: - payload['expires_at'] = expiration_date - - response = account_api.patch('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id']), - payload) - if response.ok: - result = wait_to_complete_state_transition(module, account_api, response.json) - module.exit_json(changed=True, metadata=result) - - module.fail_json(msg='Error modifying backup [{0}: {1}]'.format(response.status_code, response.json)) - - if module.check_mode: - module.exit_json(changed=True) - - payload = {'name': name, 'database_name': database_name, 'instance_id': instance_id} - if expiration_date is not None: - payload['expires_at'] = expiration_date - - response = account_api.post('/rdb/v1/regions/%s/backups' % module.params.get('region'), payload) - - if response.ok: - result = wait_to_complete_state_transition(module, account_api, response.json) - module.exit_json(changed=True, metadata=result) - - module.fail_json(msg='Error creating backup [{0}: {1}]'.format(response.status_code, response.json)) - - -def absent_strategy(module, account_api, backup): - if backup is None: - module.exit_json(changed=False) - - if module.check_mode: - module.exit_json(changed=True) - - response = account_api.delete('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id'])) - if response.ok: - result = wait_to_complete_state_transition(module, account_api, response.json) - module.exit_json(changed=True, metadata=result) - - module.fail_json(msg='Error deleting backup [{0}: {1}]'.format(response.status_code, response.json)) - - -def exported_strategy(module, account_api, backup): - if backup is None: - module.fail_json(msg=('Backup "%s" not found' % module.params['id'])) - - if backup['download_url'] is not None: - module.exit_json(changed=False, metadata=backup) - - if module.check_mode: - module.exit_json(changed=True) - - backup = wait_to_complete_state_transition(module, account_api, backup) - response = account_api.post( - '/rdb/v1/regions/%s/backups/%s/export' % (module.params.get('region'), backup['id']), {}) - - if response.ok: - result = wait_to_complete_state_transition(module, account_api, response.json) - module.exit_json(changed=True, metadata=result) - - module.fail_json(msg='Error exporting backup [{0}: {1}]'.format(response.status_code, response.json)) - - -def restored_strategy(module, account_api, backup): - if backup is None: - module.fail_json(msg=('Backup "%s" not found' % module.params['id'])) - - database_name = module.params['database_name'] - instance_id = module.params['instance_id'] - - if module.check_mode: - module.exit_json(changed=True) - - backup = wait_to_complete_state_transition(module, account_api, backup) - - payload = {'database_name': database_name, 'instance_id': instance_id} - response = account_api.post('/rdb/v1/regions/%s/backups/%s/restore' % (module.params.get('region'), backup['id']), - payload) - - if response.ok: - result = wait_to_complete_state_transition(module, account_api, response.json) - module.exit_json(changed=True, metadata=result) - - module.fail_json(msg='Error restoring backup [{0}: {1}]'.format(response.status_code, response.json)) - - -state_strategy = { - 'present': present_strategy, - 'absent': absent_strategy, - 'exported': exported_strategy, - 'restored': restored_strategy, -} - - -def core(module): - state = module.params['state'] - backup_id = module.params['id'] - - account_api = Scaleway(module) - - if backup_id is None: - backup_by_id = None - else: - response = account_api.get('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup_id)) - status_code = response.status_code - backup_json = response.json - backup_by_id = None - if status_code == 404: - backup_by_id = None - elif response.ok: - backup_by_id = backup_json - else: - module.fail_json(msg='Error getting backup [{0}: {1}]'.format(status_code, response.json['message'])) - - state_strategy[state](module, account_api, backup_by_id) - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - state=dict(default='present', choices=['absent', 'present', 'exported', 'restored']), - region=dict(required=True, choices=SCALEWAY_REGIONS), - id=dict(), - name=dict(type='str'), - database_name=dict(required=False), - instance_id=dict(required=False), - expires_at=dict(), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300), - wait_sleep_time=dict(type='int', default=3), - )) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - required_together=[ - ['database_name', 'instance_id'], - ], - required_if=[ - ['state', 'present', ['name', 'database_name', 'instance_id']], - ['state', 'absent', ['id']], - ['state', 'exported', ['id']], - ['state', 'restored', ['id', 'database_name', 'instance_id']], - ], - ) - - core(module) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_image_info.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_image_info.py deleted file mode 100644 index 98aa453f..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_image_info.py +++ /dev/null @@ -1,127 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, Yanis Guenane -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: scaleway_image_info -short_description: Gather information about the Scaleway images available. -description: - - Gather information about the Scaleway images available. -author: - - "Yanis Guenane (@Spredzy)" - - "Remy Leone (@remyleone)" -extends_documentation_fragment: -- community.general.scaleway - -options: - region: - type: str - description: - - Scaleway compute zone - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 -''' - -EXAMPLES = r''' -- name: Gather Scaleway images information - community.general.scaleway_image_info: - region: par1 - register: result - -- ansible.builtin.debug: - msg: "{{ result.scaleway_image_info }}" -''' - -RETURN = r''' ---- -scaleway_image_info: - description: - - Response from Scaleway API. - - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." - returned: success - type: list - elements: dict - sample: - "scaleway_image_info": [ - { - "arch": "x86_64", - "creation_date": "2018-07-17T16:18:49.276456+00:00", - "default_bootscript": { - "architecture": "x86_64", - "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", - "default": false, - "dtb": "", - "id": "15fbd2f7-a0f9-412b-8502-6a44da8d98b8", - "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", - "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.9-4.9.93-rev1/vmlinuz-4.9.93", - "organization": "11111111-1111-4111-8111-111111111111", - "public": true, - "title": "x86_64 mainline 4.9.93 rev1" - }, - "extra_volumes": [], - "from_server": null, - "id": "00ae4a88-3252-4eda-9feb-5f6b56bf5ef0", - "modification_date": "2018-07-17T16:42:06.319315+00:00", - "name": "Debian Stretch", - "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db", - "public": true, - "root_volume": { - "id": "da32dfbb-c5ff-476d-ae2d-c297dd09b7dd", - "name": "snapshot-2a7229dc-d431-4dc5-b66e-95db08b773af-2018-07-17_16:18", - "size": 25000000000, - "volume_type": "l_ssd" - }, - "state": "available" - } - ] -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.scaleway import ( - Scaleway, ScalewayException, scaleway_argument_spec, SCALEWAY_LOCATION) - - -class ScalewayImageInfo(Scaleway): - - def __init__(self, module): - super(ScalewayImageInfo, self).__init__(module) - self.name = 'images' - - region = module.params["region"] - self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - )) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - try: - module.exit_json( - scaleway_image_info=ScalewayImageInfo(module).get_resources() - ) - except ScalewayException as exc: - module.fail_json(msg=exc.message) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip.py deleted file mode 100644 index 7901aaad..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip.py +++ /dev/null @@ -1,262 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Scaleway IP management module -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: scaleway_ip -short_description: Scaleway IP management module -author: Remy Leone (@remyleone) -description: - - This module manages IP on Scaleway account - U(https://developer.scaleway.com) -extends_documentation_fragment: -- community.general.scaleway - - -options: - state: - type: str - description: - - Indicate desired state of the IP. - default: present - choices: - - present - - absent - - organization: - type: str - description: - - Scaleway organization identifier - required: true - - region: - type: str - description: - - Scaleway region to use (for example par1). - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 - - id: - type: str - description: - - id of the Scaleway IP (UUID) - - server: - type: str - description: - - id of the server you want to attach an IP to. - - To unattach an IP don't specify this option - - reverse: - type: str - description: - - Reverse to assign to the IP -''' - -EXAMPLES = ''' -- name: Create an IP - community.general.scaleway_ip: - organization: '{{ scw_org }}' - state: present - region: par1 - register: ip_creation_task - -- name: Make sure IP deleted - community.general.scaleway_ip: - id: '{{ ip_creation_task.scaleway_ip.id }}' - state: absent - region: par1 -''' - -RETURN = ''' -data: - description: This is only present when C(state=present) - returned: when C(state=present) - type: dict - sample: { - "ips": [ - { - "organization": "951df375-e094-4d26-97c1-ba548eeb9c42", - "reverse": null, - "id": "dd9e8df6-6775-4863-b517-e0b0ee3d7477", - "server": { - "id": "3f1568ca-b1a2-4e98-b6f7-31a0588157f1", - "name": "ansible_tuto-1" - }, - "address": "212.47.232.136" - } - ] - } -''' - -from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway -from ansible.module_utils.basic import AnsibleModule - - -def ip_attributes_should_be_changed(api, target_ip, wished_ip): - patch_payload = {} - - if target_ip["reverse"] != wished_ip["reverse"]: - patch_payload["reverse"] = wished_ip["reverse"] - - # IP is assigned to a server - if target_ip["server"] is None and wished_ip["server"]: - patch_payload["server"] = wished_ip["server"] - - # IP is unassigned to a server - try: - if target_ip["server"]["id"] and wished_ip["server"] is None: - patch_payload["server"] = wished_ip["server"] - except (TypeError, KeyError): - pass - - # IP is migrated between 2 different servers - try: - if target_ip["server"]["id"] != wished_ip["server"]: - patch_payload["server"] = wished_ip["server"] - except (TypeError, KeyError): - pass - - return patch_payload - - -def payload_from_wished_ip(wished_ip): - return dict( - (k, v) - for k, v in wished_ip.items() - if k != 'id' and v is not None - ) - - -def present_strategy(api, wished_ip): - changed = False - - response = api.get('ips') - if not response.ok: - api.module.fail_json(msg='Error getting IPs [{0}: {1}]'.format( - response.status_code, response.json['message'])) - - ips_list = response.json["ips"] - ip_lookup = dict((ip["id"], ip) - for ip in ips_list) - - if wished_ip["id"] not in ip_lookup.keys(): - changed = True - if api.module.check_mode: - return changed, {"status": "An IP would be created."} - - # Create IP - creation_response = api.post('/ips', - data=payload_from_wished_ip(wished_ip)) - - if not creation_response.ok: - msg = "Error during ip creation: %s: '%s' (%s)" % (creation_response.info['msg'], - creation_response.json['message'], - creation_response.json) - api.module.fail_json(msg=msg) - return changed, creation_response.json["ip"] - - target_ip = ip_lookup[wished_ip["id"]] - patch_payload = ip_attributes_should_be_changed(api=api, target_ip=target_ip, wished_ip=wished_ip) - - if not patch_payload: - return changed, target_ip - - changed = True - if api.module.check_mode: - return changed, {"status": "IP attributes would be changed."} - - ip_patch_response = api.patch(path="ips/%s" % target_ip["id"], - data=patch_payload) - - if not ip_patch_response.ok: - api.module.fail_json(msg='Error during IP attributes update: [{0}: {1}]'.format( - ip_patch_response.status_code, ip_patch_response.json['message'])) - - return changed, ip_patch_response.json["ip"] - - -def absent_strategy(api, wished_ip): - response = api.get('ips') - changed = False - - status_code = response.status_code - ips_json = response.json - ips_list = ips_json["ips"] - - if not response.ok: - api.module.fail_json(msg='Error getting IPs [{0}: {1}]'.format( - status_code, response.json['message'])) - - ip_lookup = dict((ip["id"], ip) - for ip in ips_list) - if wished_ip["id"] not in ip_lookup.keys(): - return changed, {} - - changed = True - if api.module.check_mode: - return changed, {"status": "IP would be destroyed"} - - response = api.delete('/ips/' + wished_ip["id"]) - if not response.ok: - api.module.fail_json(msg='Error deleting IP [{0}: {1}]'.format( - response.status_code, response.json)) - - return changed, response.json - - -def core(module): - wished_ip = { - "organization": module.params['organization'], - "reverse": module.params["reverse"], - "id": module.params["id"], - "server": module.params["server"] - } - - region = module.params["region"] - module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] - - api = Scaleway(module=module) - if module.params["state"] == "absent": - changed, summary = absent_strategy(api=api, wished_ip=wished_ip) - else: - changed, summary = present_strategy(api=api, wished_ip=wished_ip) - module.exit_json(changed=changed, scaleway_ip=summary) - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - state=dict(default='present', choices=['absent', 'present']), - organization=dict(required=True), - server=dict(), - reverse=dict(), - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - id=dict() - )) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - core(module) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip_info.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip_info.py deleted file mode 100644 index 189ee1cf..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip_info.py +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, Yanis Guenane -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: scaleway_ip_info -short_description: Gather information about the Scaleway ips available. -description: - - Gather information about the Scaleway ips available. -author: - - "Yanis Guenane (@Spredzy)" - - "Remy Leone (@remyleone)" -extends_documentation_fragment: -- community.general.scaleway - -options: - region: - type: str - description: - - Scaleway region to use (for example C(par1)). - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 -''' - -EXAMPLES = r''' -- name: Gather Scaleway ips information - community.general.scaleway_ip_info: - region: par1 - register: result - -- ansible.builtin.debug: - msg: "{{ result.scaleway_ip_info }}" -''' - -RETURN = r''' ---- -scaleway_ip_info: - description: - - Response from Scaleway API. - - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." - returned: success - type: list - elements: dict - sample: - "scaleway_ip_info": [ - { - "address": "163.172.170.243", - "id": "ea081794-a581-8899-8451-386ddaf0a451", - "organization": "3f709602-5e6c-4619-b80c-e324324324af", - "reverse": null, - "server": { - "id": "12f19bc7-109c-4517-954c-e6b3d0311363", - "name": "scw-e0d158" - } - } - ] -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.scaleway import ( - Scaleway, - ScalewayException, - scaleway_argument_spec, - SCALEWAY_LOCATION, -) - - -class ScalewayIpInfo(Scaleway): - - def __init__(self, module): - super(ScalewayIpInfo, self).__init__(module) - self.name = 'ips' - - region = module.params["region"] - self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - )) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - try: - module.exit_json( - scaleway_ip_info=ScalewayIpInfo(module).get_resources() - ) - except ScalewayException as exc: - module.fail_json(msg=exc.message) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_lb.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_lb.py deleted file mode 100644 index 2112ae44..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_lb.py +++ /dev/null @@ -1,358 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Scaleway Load-balancer management module -# -# Copyright (C) 2018 Online SAS. -# https://www.scaleway.com -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: scaleway_lb -short_description: Scaleway load-balancer management module -author: Remy Leone (@remyleone) -description: - - "This module manages load-balancers on Scaleway." -extends_documentation_fragment: -- community.general.scaleway - - -options: - - name: - type: str - description: - - Name of the load-balancer - required: true - - description: - type: str - description: - - Description of the load-balancer - required: true - - organization_id: - type: str - description: - - Organization identifier - required: true - - state: - type: str - description: - - Indicate desired state of the instance. - default: present - choices: - - present - - absent - - region: - type: str - description: - - Scaleway zone - required: true - choices: - - nl-ams - - fr-par - - pl-waw - - tags: - type: list - elements: str - description: - - List of tags to apply to the load-balancer - - wait: - description: - - Wait for the load-balancer to reach its desired state before returning. - type: bool - default: 'no' - - wait_timeout: - type: int - description: - - Time to wait for the load-balancer to reach the expected state - required: false - default: 300 - - wait_sleep_time: - type: int - description: - - Time to wait before every attempt to check the state of the load-balancer - required: false - default: 3 -''' - -EXAMPLES = ''' -- name: Create a load-balancer - community.general.scaleway_lb: - name: foobar - state: present - organization_id: 951df375-e094-4d26-97c1-ba548eeb9c42 - region: fr-par - tags: - - hello - -- name: Delete a load-balancer - community.general.scaleway_lb: - name: foobar - state: absent - organization_id: 951df375-e094-4d26-97c1-ba548eeb9c42 - region: fr-par -''' - -RETURNS = ''' -{ - "scaleway_lb": { - "backend_count": 0, - "frontend_count": 0, - "description": "Description of my load-balancer", - "id": "00000000-0000-0000-0000-000000000000", - "instances": [ - { - "id": "00000000-0000-0000-0000-000000000000", - "ip_address": "10.0.0.1", - "region": "fr-par", - "status": "ready" - }, - { - "id": "00000000-0000-0000-0000-000000000000", - "ip_address": "10.0.0.2", - "region": "fr-par", - "status": "ready" - } - ], - "ip": [ - { - "id": "00000000-0000-0000-0000-000000000000", - "ip_address": "192.168.0.1", - "lb_id": "00000000-0000-0000-0000-000000000000", - "region": "fr-par", - "organization_id": "00000000-0000-0000-0000-000000000000", - "reverse": "" - } - ], - "name": "lb_ansible_test", - "organization_id": "00000000-0000-0000-0000-000000000000", - "region": "fr-par", - "status": "ready", - "tags": [ - "first_tag", - "second_tag" - ] - } -} -''' - -import datetime -import time -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_REGIONS, SCALEWAY_ENDPOINT, scaleway_argument_spec, Scaleway - -STABLE_STATES = ( - "ready", - "absent" -) - -MUTABLE_ATTRIBUTES = ( - "name", - "description" -) - - -def payload_from_wished_lb(wished_lb): - return { - "organization_id": wished_lb["organization_id"], - "name": wished_lb["name"], - "tags": wished_lb["tags"], - "description": wished_lb["description"] - } - - -def fetch_state(api, lb): - api.module.debug("fetch_state of load-balancer: %s" % lb["id"]) - response = api.get(path=api.api_path + "/%s" % lb["id"]) - - if response.status_code == 404: - return "absent" - - if not response.ok: - msg = 'Error during state fetching: (%s) %s' % (response.status_code, response.json) - api.module.fail_json(msg=msg) - - try: - api.module.debug("Load-balancer %s in state: %s" % (lb["id"], response.json["status"])) - return response.json["status"] - except KeyError: - api.module.fail_json(msg="Could not fetch state in %s" % response.json) - - -def wait_to_complete_state_transition(api, lb, force_wait=False): - wait = api.module.params["wait"] - if not (wait or force_wait): - return - wait_timeout = api.module.params["wait_timeout"] - wait_sleep_time = api.module.params["wait_sleep_time"] - - start = datetime.datetime.utcnow() - end = start + datetime.timedelta(seconds=wait_timeout) - while datetime.datetime.utcnow() < end: - api.module.debug("We are going to wait for the load-balancer to finish its transition") - state = fetch_state(api, lb) - if state in STABLE_STATES: - api.module.debug("It seems that the load-balancer is not in transition anymore.") - api.module.debug("load-balancer in state: %s" % fetch_state(api, lb)) - break - time.sleep(wait_sleep_time) - else: - api.module.fail_json(msg="Server takes too long to finish its transition") - - -def lb_attributes_should_be_changed(target_lb, wished_lb): - diff = dict((attr, wished_lb[attr]) for attr in MUTABLE_ATTRIBUTES if target_lb[attr] != wished_lb[attr]) - - if diff: - return dict((attr, wished_lb[attr]) for attr in MUTABLE_ATTRIBUTES) - else: - return diff - - -def present_strategy(api, wished_lb): - changed = False - - response = api.get(path=api.api_path) - if not response.ok: - api.module.fail_json(msg='Error getting load-balancers [{0}: {1}]'.format( - response.status_code, response.json['message'])) - - lbs_list = response.json["lbs"] - lb_lookup = dict((lb["name"], lb) - for lb in lbs_list) - - if wished_lb["name"] not in lb_lookup.keys(): - changed = True - if api.module.check_mode: - return changed, {"status": "A load-balancer would be created."} - - # Create Load-balancer - api.warn(payload_from_wished_lb(wished_lb)) - creation_response = api.post(path=api.api_path, - data=payload_from_wished_lb(wished_lb)) - - if not creation_response.ok: - msg = "Error during lb creation: %s: '%s' (%s)" % (creation_response.info['msg'], - creation_response.json['message'], - creation_response.json) - api.module.fail_json(msg=msg) - - wait_to_complete_state_transition(api=api, lb=creation_response.json) - response = api.get(path=api.api_path + "/%s" % creation_response.json["id"]) - return changed, response.json - - target_lb = lb_lookup[wished_lb["name"]] - patch_payload = lb_attributes_should_be_changed(target_lb=target_lb, - wished_lb=wished_lb) - - if not patch_payload: - return changed, target_lb - - changed = True - if api.module.check_mode: - return changed, {"status": "Load-balancer attributes would be changed."} - - lb_patch_response = api.put(path=api.api_path + "/%s" % target_lb["id"], - data=patch_payload) - - if not lb_patch_response.ok: - api.module.fail_json(msg='Error during load-balancer attributes update: [{0}: {1}]'.format( - lb_patch_response.status_code, lb_patch_response.json['message'])) - - wait_to_complete_state_transition(api=api, lb=target_lb) - return changed, lb_patch_response.json - - -def absent_strategy(api, wished_lb): - response = api.get(path=api.api_path) - changed = False - - status_code = response.status_code - lbs_json = response.json - lbs_list = lbs_json["lbs"] - - if not response.ok: - api.module.fail_json(msg='Error getting load-balancers [{0}: {1}]'.format( - status_code, response.json['message'])) - - lb_lookup = dict((lb["name"], lb) - for lb in lbs_list) - if wished_lb["name"] not in lb_lookup.keys(): - return changed, {} - - target_lb = lb_lookup[wished_lb["name"]] - changed = True - if api.module.check_mode: - return changed, {"status": "Load-balancer would be destroyed"} - - wait_to_complete_state_transition(api=api, lb=target_lb, force_wait=True) - response = api.delete(path=api.api_path + "/%s" % target_lb["id"]) - if not response.ok: - api.module.fail_json(msg='Error deleting load-balancer [{0}: {1}]'.format( - response.status_code, response.json)) - - wait_to_complete_state_transition(api=api, lb=target_lb) - return changed, response.json - - -state_strategy = { - "present": present_strategy, - "absent": absent_strategy -} - - -def core(module): - region = module.params["region"] - wished_load_balancer = { - "state": module.params["state"], - "name": module.params["name"], - "description": module.params["description"], - "tags": module.params["tags"], - "organization_id": module.params["organization_id"] - } - module.params['api_url'] = SCALEWAY_ENDPOINT - api = Scaleway(module=module) - api.api_path = "lb/v1/regions/%s/lbs" % region - - changed, summary = state_strategy[wished_load_balancer["state"]](api=api, - wished_lb=wished_load_balancer) - module.exit_json(changed=changed, scaleway_lb=summary) - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - name=dict(required=True), - description=dict(required=True), - region=dict(required=True, choices=SCALEWAY_REGIONS), - state=dict(choices=list(state_strategy.keys()), default='present'), - tags=dict(type="list", elements="str", default=[]), - organization_id=dict(required=True), - wait=dict(type="bool", default=False), - wait_timeout=dict(type="int", default=300), - wait_sleep_time=dict(type="int", default=3), - )) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - core(module) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_organization_info.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_organization_info.py deleted file mode 100644 index a09d1bb5..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_organization_info.py +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, Yanis Guenane -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: scaleway_organization_info -short_description: Gather information about the Scaleway organizations available. -description: - - Gather information about the Scaleway organizations available. -author: - - "Yanis Guenane (@Spredzy)" - - "Remy Leone (@remyleone)" -options: - api_url: - description: - - Scaleway API URL - default: 'https://account.scaleway.com' - aliases: ['base_url'] -extends_documentation_fragment: -- community.general.scaleway - -''' - -EXAMPLES = r''' -- name: Gather Scaleway organizations information - community.general.scaleway_organization_info: - register: result - -- ansible.builtin.debug: - msg: "{{ result.scaleway_organization_info }}" -''' - -RETURN = r''' ---- -scaleway_organization_info: - description: Response from Scaleway API - returned: success - type: complex - sample: - "scaleway_organization_info": [ - { - "address_city_name": "Paris", - "address_country_code": "FR", - "address_line1": "42 Rue de l'univers", - "address_line2": null, - "address_postal_code": "75042", - "address_subdivision_code": "FR-75", - "creation_date": "2018-08-06T13:43:28.508575+00:00", - "currency": "EUR", - "customer_class": "individual", - "id": "3f709602-5e6c-4619-b80c-e8432ferewtr", - "locale": "fr_FR", - "modification_date": "2018-08-06T14:56:41.401685+00:00", - "name": "James Bond", - "support_id": "694324", - "support_level": "basic", - "support_pin": "9324", - "users": [], - "vat_number": null, - "warnings": [] - } - ] -''' - -from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible_collections.community.general.plugins.module_utils.scaleway import ( - Scaleway, ScalewayException, scaleway_argument_spec -) - - -class ScalewayOrganizationInfo(Scaleway): - - def __init__(self, module): - super(ScalewayOrganizationInfo, self).__init__(module) - self.name = 'organizations' - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']), - )) - - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - try: - module.exit_json( - scaleway_organization_info=ScalewayOrganizationInfo(module).get_resources() - ) - except ScalewayException as exc: - module.fail_json(msg=exc.message) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_private_network.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_private_network.py deleted file mode 100644 index 996a3cce..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_private_network.py +++ /dev/null @@ -1,234 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Scaleway VPC management module -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: scaleway_private_network -short_description: Scaleway private network management -version_added: 4.5.0 -author: Pascal MANGIN (@pastral) -description: - - This module manages private network on Scaleway account - (U(https://developer.scaleway.com)). -extends_documentation_fragment: -- community.general.scaleway - - -options: - state: - type: str - description: - - Indicate desired state of the VPC. - default: present - choices: - - present - - absent - - project: - type: str - description: - - Project identifier. - required: true - - region: - type: str - description: - - Scaleway region to use (for example C(par1)). - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 - - name: - type: str - description: - - Name of the VPC. - - tags: - type: list - elements: str - description: - - List of tags to apply to the instance. - default: [] - -''' - -EXAMPLES = ''' -- name: Create an private network - community.general.scaleway_vpc: - project: '{{ scw_project }}' - name: 'vpc_one' - state: present - region: par1 - register: vpc_creation_task - -- name: Make sure private network with name 'foo' is deleted in region par1 - community.general.scaleway_vpc: - name: 'foo' - state: absent - region: par1 -''' - -RETURN = ''' -scaleway_private_network: - description: Information on the VPC. - returned: success when C(state=present) - type: dict - sample: - { - "created_at": "2022-01-15T11:11:12.676445Z", - "id": "12345678-f1e6-40ec-83e5-12345d67ed89", - "name": "network", - "organization_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90", - "project_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90", - "tags": [ - "tag1", - "tag2", - "tag3", - "tag4", - "tag5" - ], - "updated_at": "2022-01-15T11:12:04.624837Z", - "zone": "fr-par-2" - } -''' - -from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway -from ansible.module_utils.basic import AnsibleModule - - -def get_private_network(api, name, page=1): - page_size = 10 - response = api.get('private-networks', params={'name': name, 'order_by': 'name_asc', 'page': page, 'page_size': page_size}) - if not response.ok: - msg = "Error during get private network creation: %s: '%s' (%s)" % (response.info['msg'], response.json['message'], response.json) - api.module.fail_json(msg=msg) - - if response.json['total_count'] == 0: - return None - - i = 0 - while i < len(response.json['private_networks']): - if response.json['private_networks'][i]['name'] == name: - return response.json['private_networks'][i] - i += 1 - - # search on next page if needed - if (page * page_size) < response.json['total_count']: - return get_private_network(api, name, page + 1) - - return None - - -def present_strategy(api, wished_private_network): - - changed = False - private_network = get_private_network(api, wished_private_network['name']) - if private_network is not None: - if set(wished_private_network['tags']) == set(private_network['tags']): - return changed, private_network - else: - # private network need to be updated - data = {'name': wished_private_network['name'], - 'tags': wished_private_network['tags'] - } - changed = True - if api.module.check_mode: - return changed, {"status": "private network would be updated"} - - response = api.patch(path='private-networks/' + private_network['id'], data=data) - if not response.ok: - api.module.fail_json(msg='Error updating private network [{0}: {1}]'.format(response.status_code, response.json)) - - return changed, response.json - - # private network need to be create - changed = True - if api.module.check_mode: - return changed, {"status": "private network would be created"} - - data = {'name': wished_private_network['name'], - 'project_id': wished_private_network['project'], - 'tags': wished_private_network['tags'] - } - - response = api.post(path='private-networks/', data=data) - - if not response.ok: - api.module.fail_json(msg='Error creating private network [{0}: {1}]'.format(response.status_code, response.json)) - - return changed, response.json - - -def absent_strategy(api, wished_private_network): - - changed = False - private_network = get_private_network(api, wished_private_network['name']) - if private_network is None: - return changed, {} - - changed = True - if api.module.check_mode: - return changed, {"status": "private network would be destroyed"} - - response = api.delete('private-networks/' + private_network['id']) - - if not response.ok: - api.module.fail_json(msg='Error deleting private network [{0}: {1}]'.format( - response.status_code, response.json)) - - return changed, response.json - - -def core(module): - - wished_private_network = { - "project": module.params['project'], - "tags": module.params['tags'], - "name": module.params['name'] - } - - region = module.params["region"] - module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint_vpc"] - - api = Scaleway(module=module) - if module.params["state"] == "absent": - changed, summary = absent_strategy(api=api, wished_private_network=wished_private_network) - else: - changed, summary = present_strategy(api=api, wished_private_network=wished_private_network) - module.exit_json(changed=changed, scaleway_private_network=summary) - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - state=dict(default='present', choices=['absent', 'present']), - project=dict(required=True), - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - tags=dict(type="list", elements="str", default=[]), - name=dict() - )) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - core(module) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group.py deleted file mode 100644 index f9faee61..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group.py +++ /dev/null @@ -1,239 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Scaleway Security Group management module -# -# Copyright (C) 2018 Antoine Barbare (antoinebarbare@gmail.com). -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: scaleway_security_group -short_description: Scaleway Security Group management module -author: Antoine Barbare (@abarbare) -description: - - This module manages Security Group on Scaleway account - U(https://developer.scaleway.com). -extends_documentation_fragment: -- community.general.scaleway - - -options: - state: - description: - - Indicate desired state of the Security Group. - type: str - choices: [ absent, present ] - default: present - - organization: - description: - - Organization identifier. - type: str - required: true - - region: - description: - - Scaleway region to use (for example C(par1)). - type: str - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 - - name: - description: - - Name of the Security Group. - type: str - required: true - - description: - description: - - Description of the Security Group. - type: str - - stateful: - description: - - Create a stateful security group which allows established connections in and out. - type: bool - required: true - - inbound_default_policy: - description: - - Default policy for incoming traffic. - type: str - choices: [ accept, drop ] - - outbound_default_policy: - description: - - Default policy for outcoming traffic. - type: str - choices: [ accept, drop ] - - organization_default: - description: - - Create security group to be the default one. - type: bool -''' - -EXAMPLES = ''' -- name: Create a Security Group - community.general.scaleway_security_group: - state: present - region: par1 - name: security_group - description: "my security group description" - organization: "43a3b6c8-916f-477b-b7ec-ff1898f5fdd9" - stateful: false - inbound_default_policy: accept - outbound_default_policy: accept - organization_default: false - register: security_group_creation_task -''' - -RETURN = ''' -data: - description: This is only present when C(state=present) - returned: when C(state=present) - type: dict - sample: { - "scaleway_security_group": { - "description": "my security group description", - "enable_default_security": true, - "id": "0168fb1f-cc46-4f69-b4be-c95d2a19bcae", - "inbound_default_policy": "accept", - "name": "security_group", - "organization": "43a3b6c8-916f-477b-b7ec-ff1898f5fdd9", - "organization_default": false, - "outbound_default_policy": "accept", - "servers": [], - "stateful": false - } - } -''' - -from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway -from ansible.module_utils.basic import AnsibleModule -from uuid import uuid4 - - -def payload_from_security_group(security_group): - return dict( - (k, v) - for k, v in security_group.items() - if k != 'id' and v is not None - ) - - -def present_strategy(api, security_group): - ret = {'changed': False} - - response = api.get('security_groups') - if not response.ok: - api.module.fail_json(msg='Error getting security groups "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json)) - - security_group_lookup = dict((sg['name'], sg) - for sg in response.json['security_groups']) - - if security_group['name'] not in security_group_lookup.keys(): - ret['changed'] = True - if api.module.check_mode: - # Help user when check mode is enabled by defining id key - ret['scaleway_security_group'] = {'id': str(uuid4())} - return ret - - # Create Security Group - response = api.post('/security_groups', - data=payload_from_security_group(security_group)) - - if not response.ok: - msg = 'Error during security group creation: "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json) - api.module.fail_json(msg=msg) - ret['scaleway_security_group'] = response.json['security_group'] - - else: - ret['scaleway_security_group'] = security_group_lookup[security_group['name']] - - return ret - - -def absent_strategy(api, security_group): - response = api.get('security_groups') - ret = {'changed': False} - - if not response.ok: - api.module.fail_json(msg='Error getting security groups "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json)) - - security_group_lookup = dict((sg['name'], sg) - for sg in response.json['security_groups']) - if security_group['name'] not in security_group_lookup.keys(): - return ret - - ret['changed'] = True - if api.module.check_mode: - return ret - - response = api.delete('/security_groups/' + security_group_lookup[security_group['name']]['id']) - if not response.ok: - api.module.fail_json(msg='Error deleting security group "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json)) - - return ret - - -def core(module): - security_group = { - 'organization': module.params['organization'], - 'name': module.params['name'], - 'description': module.params['description'], - 'stateful': module.params['stateful'], - 'inbound_default_policy': module.params['inbound_default_policy'], - 'outbound_default_policy': module.params['outbound_default_policy'], - 'organization_default': module.params['organization_default'], - } - - region = module.params['region'] - module.params['api_url'] = SCALEWAY_LOCATION[region]['api_endpoint'] - - api = Scaleway(module=module) - if module.params['state'] == 'present': - summary = present_strategy(api=api, security_group=security_group) - else: - summary = absent_strategy(api=api, security_group=security_group) - module.exit_json(**summary) - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - state=dict(type='str', default='present', choices=['absent', 'present']), - organization=dict(type='str', required=True), - name=dict(type='str', required=True), - description=dict(type='str'), - region=dict(type='str', required=True, choices=list(SCALEWAY_LOCATION.keys())), - stateful=dict(type='bool', required=True), - inbound_default_policy=dict(type='str', choices=['accept', 'drop']), - outbound_default_policy=dict(type='str', choices=['accept', 'drop']), - organization_default=dict(type='bool'), - )) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - required_if=[['stateful', True, ['inbound_default_policy', 'outbound_default_policy']]] - ) - - core(module) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_info.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_info.py deleted file mode 100644 index a15044e6..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_info.py +++ /dev/null @@ -1,115 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, Yanis Guenane -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: scaleway_security_group_info -short_description: Gather information about the Scaleway security groups available. -description: - - Gather information about the Scaleway security groups available. -author: - - "Yanis Guenane (@Spredzy)" - - "Remy Leone (@remyleone)" -options: - region: - type: str - description: - - Scaleway region to use (for example C(par1)). - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 -extends_documentation_fragment: -- community.general.scaleway - -''' - -EXAMPLES = r''' -- name: Gather Scaleway security groups information - community.general.scaleway_security_group_info: - region: par1 - register: result - -- ansible.builtin.debug: - msg: "{{ result.scaleway_security_group_info }}" -''' - -RETURN = r''' ---- -scaleway_security_group_info: - description: - - Response from Scaleway API. - - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." - returned: success - type: list - elements: dict - sample: - "scaleway_security_group_info": [ - { - "description": "test-ams", - "enable_default_security": true, - "id": "7fcde327-8bed-43a6-95c4-6dfbc56d8b51", - "name": "test-ams", - "organization": "3f709602-5e6c-4619-b80c-e841c89734af", - "organization_default": false, - "servers": [ - { - "id": "12f19bc7-108c-4517-954c-e6b3d0311363", - "name": "scw-e0d158" - } - ] - } - ] -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.scaleway import ( - Scaleway, - ScalewayException, - scaleway_argument_spec, - SCALEWAY_LOCATION, -) - - -class ScalewaySecurityGroupInfo(Scaleway): - - def __init__(self, module): - super(ScalewaySecurityGroupInfo, self).__init__(module) - self.name = 'security_groups' - - region = module.params["region"] - self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - )) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - try: - module.exit_json( - scaleway_security_group_info=ScalewaySecurityGroupInfo(module).get_resources() - ) - except ScalewayException as exc: - module.fail_json(msg=exc.message) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py deleted file mode 100644 index 9f959212..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py +++ /dev/null @@ -1,276 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Scaleway Security Group Rule management module -# -# Copyright (C) 2018 Antoine Barbare (antoinebarbare@gmail.com). -# -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: scaleway_security_group_rule -short_description: Scaleway Security Group Rule management module -author: Antoine Barbare (@abarbare) -description: - - This module manages Security Group Rule on Scaleway account - U(https://developer.scaleway.com) -extends_documentation_fragment: - - community.general.scaleway -requirements: - - ipaddress - -options: - state: - type: str - description: - - Indicate desired state of the Security Group Rule. - default: present - choices: - - present - - absent - - region: - type: str - description: - - Scaleway region to use (for example C(par1)). - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 - - protocol: - type: str - description: - - Network protocol to use - choices: - - TCP - - UDP - - ICMP - required: true - - port: - description: - - Port related to the rule, null value for all the ports - required: true - type: int - - ip_range: - type: str - description: - - IPV4 CIDR notation to apply to the rule - default: 0.0.0.0/0 - - direction: - type: str - description: - - Rule direction - choices: - - inbound - - outbound - required: true - - action: - type: str - description: - - Rule action - choices: - - accept - - drop - required: true - - security_group: - type: str - description: - - Security Group unique identifier - required: true -''' - -EXAMPLES = ''' - - name: Create a Security Group Rule - community.general.scaleway_security_group_rule: - state: present - region: par1 - protocol: TCP - port: 80 - ip_range: 0.0.0.0/0 - direction: inbound - action: accept - security_group: b57210ee-1281-4820-a6db-329f78596ecb - register: security_group_rule_creation_task -''' - -RETURN = ''' -data: - description: This is only present when C(state=present) - returned: when C(state=present) - type: dict - sample: { - "scaleway_security_group_rule": { - "direction": "inbound", - "protocol": "TCP", - "ip_range": "0.0.0.0/0", - "dest_port_from": 80, - "action": "accept", - "position": 2, - "dest_port_to": null, - "editable": null, - "id": "10cb0b9a-80f6-4830-abd7-a31cd828b5e9" - } - } -''' - -import traceback - -from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway, payload_from_object -from ansible.module_utils.common.text.converters import to_text -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - -try: - from ipaddress import ip_network -except ImportError: - IPADDRESS_IMP_ERR = traceback.format_exc() - HAS_IPADDRESS = False -else: - HAS_IPADDRESS = True - - -def get_sgr_from_api(security_group_rules, security_group_rule): - """ Check if a security_group_rule specs are present in security_group_rules - Return None if no rules match the specs - Return the rule if found - """ - for sgr in security_group_rules: - if (sgr['ip_range'] == security_group_rule['ip_range'] and sgr['dest_port_from'] == security_group_rule['dest_port_from'] and - sgr['direction'] == security_group_rule['direction'] and sgr['action'] == security_group_rule['action'] and - sgr['protocol'] == security_group_rule['protocol']): - return sgr - - return None - - -def present_strategy(api, security_group_id, security_group_rule): - ret = {'changed': False} - - response = api.get('security_groups/%s/rules' % security_group_id) - if not response.ok: - api.module.fail_json( - msg='Error getting security group rules "%s": "%s" (%s)' % - (response.info['msg'], response.json['message'], response.json)) - - existing_rule = get_sgr_from_api( - response.json['rules'], security_group_rule) - - if not existing_rule: - ret['changed'] = True - if api.module.check_mode: - return ret - - # Create Security Group Rule - response = api.post('/security_groups/%s/rules' % security_group_id, - data=payload_from_object(security_group_rule)) - - if not response.ok: - api.module.fail_json( - msg='Error during security group rule creation: "%s": "%s" (%s)' % - (response.info['msg'], response.json['message'], response.json)) - ret['scaleway_security_group_rule'] = response.json['rule'] - - else: - ret['scaleway_security_group_rule'] = existing_rule - - return ret - - -def absent_strategy(api, security_group_id, security_group_rule): - ret = {'changed': False} - - response = api.get('security_groups/%s/rules' % security_group_id) - if not response.ok: - api.module.fail_json( - msg='Error getting security group rules "%s": "%s" (%s)' % - (response.info['msg'], response.json['message'], response.json)) - - existing_rule = get_sgr_from_api( - response.json['rules'], security_group_rule) - - if not existing_rule: - return ret - - ret['changed'] = True - if api.module.check_mode: - return ret - - response = api.delete( - '/security_groups/%s/rules/%s' % - (security_group_id, existing_rule['id'])) - if not response.ok: - api.module.fail_json( - msg='Error deleting security group rule "%s": "%s" (%s)' % - (response.info['msg'], response.json['message'], response.json)) - - return ret - - -def core(module): - api = Scaleway(module=module) - - security_group_rule = { - 'protocol': module.params['protocol'], - 'dest_port_from': module.params['port'], - 'ip_range': module.params['ip_range'], - 'direction': module.params['direction'], - 'action': module.params['action'], - } - - region = module.params['region'] - module.params['api_url'] = SCALEWAY_LOCATION[region]['api_endpoint'] - - if module.params['state'] == 'present': - summary = present_strategy( - api=api, - security_group_id=module.params['security_group'], - security_group_rule=security_group_rule) - else: - summary = absent_strategy( - api=api, - security_group_id=module.params['security_group'], - security_group_rule=security_group_rule) - module.exit_json(**summary) - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update( - state=dict(type='str', default='present', choices=['absent', 'present']), - region=dict(type='str', required=True, choices=list(SCALEWAY_LOCATION.keys())), - protocol=dict(type='str', required=True, choices=['TCP', 'UDP', 'ICMP']), - port=dict(type='int', required=True), - ip_range=dict(type='str', default='0.0.0.0/0'), - direction=dict(type='str', required=True, choices=['inbound', 'outbound']), - action=dict(type='str', required=True, choices=['accept', 'drop']), - security_group=dict(type='str', required=True), - ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - if not HAS_IPADDRESS: - module.fail_json(msg=missing_required_lib('ipaddress'), exception=IPADDRESS_IMP_ERR) - - core(module) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_server_info.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_server_info.py deleted file mode 100644 index 2b9d91b4..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_server_info.py +++ /dev/null @@ -1,198 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, Yanis Guenane -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: scaleway_server_info -short_description: Gather information about the Scaleway servers available. -description: - - Gather information about the Scaleway servers available. -author: - - "Yanis Guenane (@Spredzy)" - - "Remy Leone (@remyleone)" -extends_documentation_fragment: -- community.general.scaleway - -options: - region: - type: str - description: - - Scaleway region to use (for example C(par1)). - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 -''' - -EXAMPLES = r''' -- name: Gather Scaleway servers information - community.general.scaleway_server_info: - region: par1 - register: result - -- ansible.builtin.debug: - msg: "{{ result.scaleway_server_info }}" -''' - -RETURN = r''' ---- -scaleway_server_info: - description: - - Response from Scaleway API. - - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." - returned: success - type: list - elements: dict - sample: - "scaleway_server_info": [ - { - "arch": "x86_64", - "boot_type": "local", - "bootscript": { - "architecture": "x86_64", - "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", - "default": true, - "dtb": "", - "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9", - "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", - "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127", - "organization": "11111111-1111-4111-8111-111111111111", - "public": true, - "title": "x86_64 mainline 4.4.127 rev1" - }, - "commercial_type": "START1-XS", - "creation_date": "2018-08-14T21:36:56.271545+00:00", - "dynamic_ip_required": false, - "enable_ipv6": false, - "extra_networks": [], - "hostname": "scw-e0d256", - "id": "12f19bc7-108c-4517-954c-e6b3d0311363", - "image": { - "arch": "x86_64", - "creation_date": "2018-04-26T12:42:21.619844+00:00", - "default_bootscript": { - "architecture": "x86_64", - "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", - "default": true, - "dtb": "", - "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9", - "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", - "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127", - "organization": "11111111-1111-4111-8111-111111111111", - "public": true, - "title": "x86_64 mainline 4.4.127 rev1" - }, - "extra_volumes": [], - "from_server": null, - "id": "67375eb1-f14d-4f02-bb42-6119cecbde51", - "modification_date": "2018-04-26T12:49:07.573004+00:00", - "name": "Ubuntu Xenial", - "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db", - "public": true, - "root_volume": { - "id": "020b8d61-3867-4a0e-84a4-445c5393e05d", - "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42", - "size": 25000000000, - "volume_type": "l_ssd" - }, - "state": "available" - }, - "ipv6": null, - "location": { - "cluster_id": "5", - "hypervisor_id": "412", - "node_id": "2", - "platform_id": "13", - "zone_id": "par1" - }, - "maintenances": [], - "modification_date": "2018-08-14T21:37:28.630882+00:00", - "name": "scw-e0d256", - "organization": "3f709602-5e6c-4619-b80c-e841c89734af", - "private_ip": "10.14.222.131", - "protected": false, - "public_ip": { - "address": "163.172.170.197", - "dynamic": false, - "id": "ea081794-a581-4495-8451-386ddaf0a451" - }, - "security_group": { - "id": "a37379d2-d8b0-4668-9cfb-1233fc436f7e", - "name": "Default security group" - }, - "state": "running", - "state_detail": "booted", - "tags": [], - "volumes": { - "0": { - "creation_date": "2018-08-14T21:36:56.271545+00:00", - "export_uri": "device://dev/vda", - "id": "68386fae-4f55-4fbf-aabb-953036a85872", - "modification_date": "2018-08-14T21:36:56.271545+00:00", - "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42", - "organization": "3f709602-5e6c-4619-b80c-e841c89734af", - "server": { - "id": "12f19bc7-108c-4517-954c-e6b3d0311363", - "name": "scw-e0d256" - }, - "size": 25000000000, - "state": "available", - "volume_type": "l_ssd" - } - } - } - ] -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.scaleway import ( - Scaleway, - ScalewayException, - scaleway_argument_spec, - SCALEWAY_LOCATION, -) - - -class ScalewayServerInfo(Scaleway): - - def __init__(self, module): - super(ScalewayServerInfo, self).__init__(module) - self.name = 'servers' - - region = module.params["region"] - self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - )) - - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - try: - module.exit_json( - scaleway_server_info=ScalewayServerInfo(module).get_resources() - ) - except ScalewayException as exc: - module.fail_json(msg=exc.message) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_snapshot_info.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_snapshot_info.py deleted file mode 100644 index 8e1d2a61..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_snapshot_info.py +++ /dev/null @@ -1,116 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, Yanis Guenane -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: scaleway_snapshot_info -short_description: Gather information about the Scaleway snapshots available. -description: - - Gather information about the Scaleway snapshot available. -author: - - "Yanis Guenane (@Spredzy)" - - "Remy Leone (@remyleone)" -extends_documentation_fragment: -- community.general.scaleway - -options: - region: - type: str - description: - - Scaleway region to use (for example C(par1)). - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 -''' - -EXAMPLES = r''' -- name: Gather Scaleway snapshots information - community.general.scaleway_snapshot_info: - region: par1 - register: result - -- ansible.builtin.debug: - msg: "{{ result.scaleway_snapshot_info }}" -''' - -RETURN = r''' ---- -scaleway_snapshot_info: - description: - - Response from Scaleway API. - - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." - returned: success - type: list - elements: dict - sample: - "scaleway_snapshot_info": [ - { - "base_volume": { - "id": "68386fae-4f55-4fbf-aabb-953036a85872", - "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42" - }, - "creation_date": "2018-08-14T22:34:35.299461+00:00", - "id": "b61b4b03-a2e9-4da5-b5ea-e462ac0662d2", - "modification_date": "2018-08-14T22:34:54.520560+00:00", - "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42 snapshot", - "organization": "3f709602-5e6c-4619-b80c-e841c89734af", - "size": 25000000000, - "state": "available", - "volume_type": "l_ssd" - } - ] -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.scaleway import ( - Scaleway, - ScalewayException, - scaleway_argument_spec, - SCALEWAY_LOCATION -) - - -class ScalewaySnapshotInfo(Scaleway): - - def __init__(self, module): - super(ScalewaySnapshotInfo, self).__init__(module) - self.name = 'snapshots' - - region = module.params["region"] - self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - )) - - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - try: - module.exit_json( - scaleway_snapshot_info=ScalewaySnapshotInfo(module).get_resources() - ) - except ScalewayException as exc: - module.fail_json(msg=exc.message) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_sshkey.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_sshkey.py deleted file mode 100644 index 4c559092..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_sshkey.py +++ /dev/null @@ -1,173 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Scaleway SSH keys management module -# -# Copyright (C) 2018 Online SAS. -# https://www.scaleway.com -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: scaleway_sshkey -short_description: Scaleway SSH keys management module -author: Remy Leone (@remyleone) -description: - - This module manages SSH keys on Scaleway account - U(https://developer.scaleway.com) -extends_documentation_fragment: -- community.general.scaleway - - -options: - state: - type: str - description: - - Indicate desired state of the SSH key. - default: present - choices: - - present - - absent - ssh_pub_key: - type: str - description: - - The public SSH key as a string to add. - required: true - api_url: - type: str - description: - - Scaleway API URL - default: 'https://account.scaleway.com' - aliases: ['base_url'] -''' - -EXAMPLES = ''' -- name: "Add SSH key" - community.general.scaleway_sshkey: - ssh_pub_key: "ssh-rsa AAAA..." - state: "present" - -- name: "Delete SSH key" - community.general.scaleway_sshkey: - ssh_pub_key: "ssh-rsa AAAA..." - state: "absent" - -- name: "Add SSH key with explicit token" - community.general.scaleway_sshkey: - ssh_pub_key: "ssh-rsa AAAA..." - state: "present" - oauth_token: "6ecd2c9b-6f4f-44d4-a187-61a92078d08c" -''' - -RETURN = ''' -data: - description: This is only present when C(state=present) - returned: when C(state=present) - type: dict - sample: { - "ssh_public_keys": [ - {"key": "ssh-rsa AAAA...."} - ] - } -''' - -from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible_collections.community.general.plugins.module_utils.scaleway import scaleway_argument_spec, Scaleway - - -def extract_present_sshkeys(raw_organization_dict): - ssh_key_list = raw_organization_dict["organizations"][0]["users"][0]["ssh_public_keys"] - ssh_key_lookup = [ssh_key["key"] for ssh_key in ssh_key_list] - return ssh_key_lookup - - -def extract_user_id(raw_organization_dict): - return raw_organization_dict["organizations"][0]["users"][0]["id"] - - -def sshkey_user_patch(ssh_lookup): - ssh_list = {"ssh_public_keys": [{"key": key} - for key in ssh_lookup]} - return ssh_list - - -def core(module): - ssh_pub_key = module.params['ssh_pub_key'] - state = module.params["state"] - account_api = Scaleway(module) - response = account_api.get('organizations') - - status_code = response.status_code - organization_json = response.json - - if not response.ok: - module.fail_json(msg='Error getting ssh key [{0}: {1}]'.format( - status_code, response.json['message'])) - - user_id = extract_user_id(organization_json) - present_sshkeys = [] - try: - present_sshkeys = extract_present_sshkeys(organization_json) - except (KeyError, IndexError) as e: - module.fail_json(changed=False, data="Error while extracting present SSH keys from API") - - if state in ('present',): - if ssh_pub_key in present_sshkeys: - module.exit_json(changed=False) - - # If key not found create it! - if module.check_mode: - module.exit_json(changed=True) - - present_sshkeys.append(ssh_pub_key) - payload = sshkey_user_patch(present_sshkeys) - - response = account_api.patch('/users/%s' % user_id, data=payload) - - if response.ok: - module.exit_json(changed=True, data=response.json) - - module.fail_json(msg='Error creating ssh key [{0}: {1}]'.format( - response.status_code, response.json)) - - elif state in ('absent',): - if ssh_pub_key not in present_sshkeys: - module.exit_json(changed=False) - - if module.check_mode: - module.exit_json(changed=True) - - present_sshkeys.remove(ssh_pub_key) - payload = sshkey_user_patch(present_sshkeys) - - response = account_api.patch('/users/%s' % user_id, data=payload) - - if response.ok: - module.exit_json(changed=True, data=response.json) - - module.fail_json(msg='Error deleting ssh key [{0}: {1}]'.format( - response.status_code, response.json)) - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - state=dict(default='present', choices=['absent', 'present']), - ssh_pub_key=dict(required=True), - api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']), - )) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - core(module) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_user_data.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_user_data.py deleted file mode 100644 index 2848ec2c..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_user_data.py +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Scaleway user data management module -# -# Copyright (C) 2018 Online SAS. -# https://www.scaleway.com -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: scaleway_user_data -short_description: Scaleway user_data management module -author: Remy Leone (@remyleone) -description: - - "This module manages user_data on compute instances on Scaleway." - - "It can be used to configure cloud-init for instance" -extends_documentation_fragment: -- community.general.scaleway - - -options: - - server_id: - type: str - description: - - Scaleway Compute instance ID of the server - required: true - - user_data: - type: dict - description: - - User defined data. Typically used with `cloud-init`. - - Pass your cloud-init script here as a string - required: false - - region: - type: str - description: - - Scaleway compute zone - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 -''' - -EXAMPLES = ''' -- name: Update the cloud-init - community.general.scaleway_user_data: - server_id: '5a33b4ab-57dd-4eb6-8b0a-d95eb63492ce' - region: ams1 - user_data: - cloud-init: 'final_message: "Hello World!"' -''' - -RETURN = ''' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway - - -def patch_user_data(compute_api, server_id, key, value): - compute_api.module.debug("Starting patching user_data attributes") - - path = "servers/%s/user_data/%s" % (server_id, key) - response = compute_api.patch(path=path, data=value, headers={"Content-Type": "text/plain"}) - if not response.ok: - msg = 'Error during user_data patching: %s %s' % (response.status_code, response.body) - compute_api.module.fail_json(msg=msg) - - return response - - -def delete_user_data(compute_api, server_id, key): - compute_api.module.debug("Starting deleting user_data attributes: %s" % key) - - response = compute_api.delete(path="servers/%s/user_data/%s" % (server_id, key)) - - if not response.ok: - msg = 'Error during user_data deleting: (%s) %s' % response.status_code, response.body - compute_api.module.fail_json(msg=msg) - - return response - - -def get_user_data(compute_api, server_id, key): - compute_api.module.debug("Starting patching user_data attributes") - - path = "servers/%s/user_data/%s" % (server_id, key) - response = compute_api.get(path=path) - if not response.ok: - msg = 'Error during user_data patching: %s %s' % (response.status_code, response.body) - compute_api.module.fail_json(msg=msg) - - return response.json - - -def core(module): - region = module.params["region"] - server_id = module.params["server_id"] - user_data = module.params["user_data"] - changed = False - - module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] - compute_api = Scaleway(module=module) - - user_data_list = compute_api.get(path="servers/%s/user_data" % server_id) - if not user_data_list.ok: - msg = 'Error during user_data fetching: %s %s' % user_data_list.status_code, user_data_list.body - compute_api.module.fail_json(msg=msg) - - present_user_data_keys = user_data_list.json["user_data"] - present_user_data = dict( - (key, get_user_data(compute_api=compute_api, server_id=server_id, key=key)) - for key in present_user_data_keys - ) - - if present_user_data == user_data: - module.exit_json(changed=changed, msg=user_data_list.json) - - # First we remove keys that are not defined in the wished user_data - for key in present_user_data: - if key not in user_data: - - changed = True - if compute_api.module.check_mode: - module.exit_json(changed=changed, msg={"status": "User-data of %s would be patched." % server_id}) - - delete_user_data(compute_api=compute_api, server_id=server_id, key=key) - - # Then we patch keys that are different - for key, value in user_data.items(): - if key not in present_user_data or user_data[key] != present_user_data[key]: - - changed = True - if compute_api.module.check_mode: - module.exit_json(changed=changed, msg={"status": "User-data of %s would be patched." % server_id}) - - patch_user_data(compute_api=compute_api, server_id=server_id, key=key, value=value) - - module.exit_json(changed=changed, msg=user_data) - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - user_data=dict(type="dict"), - server_id=dict(required=True), - )) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - core(module) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume.py deleted file mode 100644 index e68309fc..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume.py +++ /dev/null @@ -1,195 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Scaleway volumes management module -# -# Copyright (C) 2018 Henryk Konsek Consulting (hekonsek@gmail.com). -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: scaleway_volume -short_description: Scaleway volumes management module -author: Henryk Konsek (@hekonsek) -description: - - This module manages volumes on Scaleway account - U(https://developer.scaleway.com) -extends_documentation_fragment: -- community.general.scaleway - - -options: - state: - type: str - description: - - Indicate desired state of the volume. - default: present - choices: - - present - - absent - region: - type: str - description: - - Scaleway region to use (for example par1). - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 - name: - type: str - description: - - Name used to identify the volume. - required: true - project: - type: str - description: - - Scaleway project ID to which volume belongs. - version_added: 4.3.0 - organization: - type: str - description: - - ScaleWay organization ID to which volume belongs. - size: - type: int - description: - - Size of the volume in bytes. - volume_type: - type: str - description: - - Type of the volume (for example 'l_ssd'). -''' - -EXAMPLES = ''' -- name: Create 10GB volume - community.general.scaleway_volume: - name: my-volume - state: present - region: par1 - project: "{{ scw_org }}" - "size": 10000000000 - volume_type: l_ssd - register: server_creation_check_task - -- name: Make sure volume deleted - community.general.scaleway_volume: - name: my-volume - state: absent - region: par1 -''' - -RETURN = ''' -data: - description: This is only present when C(state=present) - returned: when C(state=present) - type: dict - sample: { - "volume": { - "export_uri": null, - "id": "c675f420-cfeb-48ff-ba2a-9d2a4dbe3fcd", - "name": "volume-0-3", - "project": "000a115d-2852-4b0a-9ce8-47f1134ba95a", - "server": null, - "size": 10000000000, - "volume_type": "l_ssd" - } -} -''' - -from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway -from ansible.module_utils.basic import AnsibleModule - - -def core(module): - region = module.params["region"] - state = module.params['state'] - name = module.params['name'] - organization = module.params['organization'] - project = module.params['project'] - size = module.params['size'] - volume_type = module.params['volume_type'] - module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] - - account_api = Scaleway(module) - response = account_api.get('volumes') - status_code = response.status_code - volumes_json = response.json - - if project is None: - project = organization - - if not response.ok: - module.fail_json(msg='Error getting volume [{0}: {1}]'.format( - status_code, response.json['message'])) - - volumeByName = None - for volume in volumes_json['volumes']: - if volume['project'] == project and volume['name'] == name: - volumeByName = volume - - if state in ('present',): - if volumeByName is not None: - module.exit_json(changed=False) - - payload = {'name': name, 'project': project, 'size': size, 'volume_type': volume_type} - - response = account_api.post('/volumes', payload) - - if response.ok: - module.exit_json(changed=True, data=response.json) - - module.fail_json(msg='Error creating volume [{0}: {1}]'.format( - response.status_code, response.json)) - - elif state in ('absent',): - if volumeByName is None: - module.exit_json(changed=False) - - if module.check_mode: - module.exit_json(changed=True) - - response = account_api.delete('/volumes/' + volumeByName['id']) - if response.status_code == 204: - module.exit_json(changed=True, data=response.json) - - module.fail_json(msg='Error deleting volume [{0}: {1}]'.format( - response.status_code, response.json)) - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - state=dict(default='present', choices=['absent', 'present']), - name=dict(required=True), - size=dict(type='int'), - project=dict(), - organization=dict(), - volume_type=dict(), - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - )) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=[ - ('organization', 'project'), - ], - required_one_of=[ - ('organization', 'project'), - ], - ) - - core(module) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume_info.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume_info.py deleted file mode 100644 index e8dfa414..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume_info.py +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, Yanis Guenane -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: scaleway_volume_info -short_description: Gather information about the Scaleway volumes available. -description: - - Gather information about the Scaleway volumes available. -author: - - "Yanis Guenane (@Spredzy)" - - "Remy Leone (@remyleone)" -extends_documentation_fragment: -- community.general.scaleway - -options: - region: - type: str - description: - - Scaleway region to use (for example C(par1)). - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 -''' - -EXAMPLES = r''' -- name: Gather Scaleway volumes information - community.general.scaleway_volume_info: - region: par1 - register: result - -- ansible.builtin.debug: - msg: "{{ result.scaleway_volume_info }}" -''' - -RETURN = r''' ---- -scaleway_volume_info: - description: - - Response from Scaleway API. - - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." - returned: success - type: list - elements: dict - sample: - "scaleway_volume_info": [ - { - "creation_date": "2018-08-14T20:56:24.949660+00:00", - "export_uri": null, - "id": "b8d51a06-daeb-4fef-9539-a8aea016c1ba", - "modification_date": "2018-08-14T20:56:24.949660+00:00", - "name": "test-volume", - "organization": "3f709602-5e6c-4619-b80c-e841c89734af", - "server": null, - "size": 50000000000, - "state": "available", - "volume_type": "l_ssd" - } - ] -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.scaleway import ( - Scaleway, ScalewayException, scaleway_argument_spec, - SCALEWAY_LOCATION) - - -class ScalewayVolumeInfo(Scaleway): - - def __init__(self, module): - super(ScalewayVolumeInfo, self).__init__(module) - self.name = 'volumes' - - region = module.params["region"] - self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - )) - - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - try: - module.exit_json( - scaleway_volume_info=ScalewayVolumeInfo(module).get_resources() - ) - except ScalewayException as exc: - module.fail_json(msg=exc.message) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/smartos/imgadm.py b/ansible_collections/community/general/plugins/modules/cloud/smartos/imgadm.py deleted file mode 100644 index 18a67d01..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/smartos/imgadm.py +++ /dev/null @@ -1,311 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2016, 2017 Jasper Lievisse Adriaanse -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: imgadm -short_description: Manage SmartOS images -description: - - Manage SmartOS virtual machine images through imgadm(1M) -author: Jasper Lievisse Adriaanse (@jasperla) -options: - force: - required: false - type: bool - description: - - Force a given operation (where supported by imgadm(1M)). - pool: - required: false - default: zones - description: - - zpool to import to or delete images from. - type: str - source: - required: false - description: - - URI for the image source. - type: str - state: - required: true - choices: [ present, absent, deleted, imported, updated, vacuumed ] - description: - - State the object operated on should be in. C(imported) is an alias for - for C(present) and C(deleted) for C(absent). When set to C(vacuumed) - and C(uuid) to C(*), it will remove all unused images. - type: str - - type: - required: false - choices: [ imgapi, docker, dsapi ] - default: imgapi - description: - - Type for image sources. - type: str - - uuid: - required: false - description: - - Image UUID. Can either be a full UUID or C(*) for all images. - type: str - -requirements: - - python >= 2.6 -''' - -EXAMPLES = ''' -- name: Import an image - community.general.imgadm: - uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764' - state: imported - -- name: Delete an image - community.general.imgadm: - uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764' - state: deleted - -- name: Update all images - community.general.imgadm: - uuid: '*' - state: updated - -- name: Update a single image - community.general.imgadm: - uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764' - state: updated - -- name: Add a source - community.general.imgadm: - source: 'https://datasets.project-fifo.net' - state: present - -- name: Add a Docker source - community.general.imgadm: - source: 'https://docker.io' - type: docker - state: present - -- name: Remove a source - community.general.imgadm: - source: 'https://docker.io' - state: absent -''' - -RETURN = ''' -source: - description: Source that is managed. - returned: When not managing an image. - type: str - sample: https://datasets.project-fifo.net -uuid: - description: UUID for an image operated on. - returned: When not managing an image source. - type: str - sample: 70e3ae72-96b6-11e6-9056-9737fd4d0764 -state: - description: State of the target, after execution. - returned: success - type: str - sample: 'present' -''' - -import re - -from ansible.module_utils.basic import AnsibleModule - -# Shortcut for the imgadm(1M) command. While imgadm(1M) supports a -# -E option to return any errors in JSON, the generated JSON does not play well -# with the JSON parsers of Python. The returned message contains '\n' as part of -# the stacktrace, which breaks the parsers. - - -class Imgadm(object): - def __init__(self, module): - self.module = module - self.params = module.params - self.cmd = module.get_bin_path('imgadm', required=True) - self.changed = False - self.uuid = module.params['uuid'] - - # Since there are a number of (natural) aliases, prevent having to look - # them up everytime we operate on `state`. - if self.params['state'] in ['present', 'imported', 'updated']: - self.present = True - else: - self.present = False - - # Perform basic UUID validation upfront. - if self.uuid and self.uuid != '*': - if not re.match('^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', self.uuid, re.IGNORECASE): - module.fail_json(msg='Provided value for uuid option is not a valid UUID.') - - # Helper method to massage stderr - def errmsg(self, stderr): - match = re.match(r'^imgadm .*?: error \(\w+\): (.*): .*', stderr) - if match: - return match.groups()[0] - else: - return 'Unexpected failure' - - def update_images(self): - if self.uuid == '*': - cmd = '{0} update'.format(self.cmd) - else: - cmd = '{0} update {1}'.format(self.cmd, self.uuid) - - (rc, stdout, stderr) = self.module.run_command(cmd) - - if rc != 0: - self.module.fail_json(msg='Failed to update images: {0}'.format(self.errmsg(stderr))) - - # There is no feedback from imgadm(1M) to determine if anything - # was actually changed. So treat this as an 'always-changes' operation. - # Note that 'imgadm -v' produces unparseable JSON... - self.changed = True - - def manage_sources(self): - force = self.params['force'] - source = self.params['source'] - imgtype = self.params['type'] - - cmd = '{0} sources'.format(self.cmd) - - if force: - cmd += ' -f' - - if self.present: - cmd = '{0} -a {1} -t {2}'.format(cmd, source, imgtype) - (rc, stdout, stderr) = self.module.run_command(cmd) - - if rc != 0: - self.module.fail_json(msg='Failed to add source: {0}'.format(self.errmsg(stderr))) - - # Check the various responses. - # Note that trying to add a source with the wrong type is handled - # above as it results in a non-zero status. - - regex = 'Already have "{0}" image source "{1}", no change'.format(imgtype, source) - if re.match(regex, stdout): - self.changed = False - - regex = 'Added "%s" image source "%s"' % (imgtype, source) - if re.match(regex, stdout): - self.changed = True - else: - # Type is ignored by imgadm(1M) here - cmd += ' -d %s' % source - (rc, stdout, stderr) = self.module.run_command(cmd) - - if rc != 0: - self.module.fail_json(msg='Failed to remove source: {0}'.format(self.errmsg(stderr))) - - regex = 'Do not have image source "%s", no change' % source - if re.match(regex, stdout): - self.changed = False - - regex = 'Deleted ".*" image source "%s"' % source - if re.match(regex, stdout): - self.changed = True - - def manage_images(self): - pool = self.params['pool'] - state = self.params['state'] - - if state == 'vacuumed': - # Unconditionally pass '--force', otherwise we're prompted with 'y/N' - cmd = '{0} vacuum -f'.format(self.cmd) - - (rc, stdout, stderr) = self.module.run_command(cmd) - - if rc != 0: - self.module.fail_json(msg='Failed to vacuum images: {0}'.format(self.errmsg(stderr))) - else: - if stdout == '': - self.changed = False - else: - self.changed = True - if self.present: - cmd = '{0} import -P {1} -q {2}'.format(self.cmd, pool, self.uuid) - - (rc, stdout, stderr) = self.module.run_command(cmd) - - if rc != 0: - self.module.fail_json(msg='Failed to import image: {0}'.format(self.errmsg(stderr))) - - regex = r'Image {0} \(.*\) is already installed, skipping'.format(self.uuid) - if re.match(regex, stdout): - self.changed = False - - regex = '.*ActiveImageNotFound.*' - if re.match(regex, stderr): - self.changed = False - - regex = 'Imported image {0}.*'.format(self.uuid) - if re.match(regex, stdout.splitlines()[-1]): - self.changed = True - else: - cmd = '{0} delete -P {1} {2}'.format(self.cmd, pool, self.uuid) - - (rc, stdout, stderr) = self.module.run_command(cmd) - - regex = '.*ImageNotInstalled.*' - if re.match(regex, stderr): - # Even if the 'rc' was non-zero (3), we handled the situation - # in order to determine if there was a change. - self.changed = False - - regex = 'Deleted image {0}'.format(self.uuid) - if re.match(regex, stdout): - self.changed = True - - -def main(): - module = AnsibleModule( - argument_spec=dict( - force=dict(type='bool'), - pool=dict(default='zones'), - source=dict(), - state=dict(required=True, choices=['present', 'absent', 'deleted', 'imported', 'updated', 'vacuumed']), - type=dict(default='imgapi', choices=['imgapi', 'docker', 'dsapi']), - uuid=dict() - ), - # This module relies largely on imgadm(1M) to enforce idempotency, which does not - # provide a "noop" (or equivalent) mode to do a dry-run. - supports_check_mode=False, - ) - - imgadm = Imgadm(module) - - uuid = module.params['uuid'] - source = module.params['source'] - state = module.params['state'] - - result = {'state': state} - - # Either manage sources or images. - if source: - result['source'] = source - imgadm.manage_sources() - else: - result['uuid'] = uuid - - if state == 'updated': - imgadm.update_images() - else: - # Make sure operate on a single image for the following actions - if (uuid == '*') and (state != 'vacuumed'): - module.fail_json(msg='Can only specify uuid as "*" when updating image(s)') - imgadm.manage_images() - - result['changed'] = imgadm.changed - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/smartos/nictagadm.py b/ansible_collections/community/general/plugins/modules/cloud/smartos/nictagadm.py deleted file mode 100644 index 05aba6f1..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/smartos/nictagadm.py +++ /dev/null @@ -1,224 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, Bruce Smith -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: nictagadm -short_description: Manage nic tags on SmartOS systems -description: - - Create or delete nic tags on SmartOS systems. -author: -- Bruce Smith (@SmithX10) -options: - name: - description: - - Name of the nic tag. - required: true - type: str - mac: - description: - - Specifies the I(mac) address to attach the nic tag to when not creating an I(etherstub). - - Parameters I(mac) and I(etherstub) are mutually exclusive. - type: str - etherstub: - description: - - Specifies that the nic tag will be attached to a created I(etherstub). - - Parameter I(etherstub) is mutually exclusive with both I(mtu), and I(mac). - type: bool - default: no - mtu: - description: - - Specifies the size of the I(mtu) of the desired nic tag. - - Parameters I(mtu) and I(etherstub) are mutually exclusive. - type: int - force: - description: - - When I(state) is absent set this switch will use the C(-f) parameter and delete the nic tag regardless of existing VMs. - type: bool - default: no - state: - description: - - Create or delete a SmartOS nic tag. - type: str - choices: [ absent, present ] - default: present -''' - -EXAMPLES = r''' -- name: Create 'storage0' on '00:1b:21:a3:f5:4d' - community.general.nictagadm: - name: storage0 - mac: 00:1b:21:a3:f5:4d - mtu: 9000 - state: present - -- name: Remove 'storage0' nic tag - community.general.nictagadm: - name: storage0 - state: absent -''' - -RETURN = r''' -name: - description: nic tag name - returned: always - type: str - sample: storage0 -mac: - description: MAC Address that the nic tag was attached to. - returned: always - type: str - sample: 00:1b:21:a3:f5:4d -etherstub: - description: specifies if the nic tag will create and attach to an etherstub. - returned: always - type: bool - sample: False -mtu: - description: specifies which MTU size was passed during the nictagadm add command. mtu and etherstub are mutually exclusive. - returned: always - type: int - sample: 1500 -force: - description: Shows if -f was used during the deletion of a nic tag - returned: always - type: bool - sample: False -state: - description: state of the target - returned: always - type: str - sample: present -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.network import is_mac - - -class NicTag(object): - - def __init__(self, module): - self.module = module - - self.name = module.params['name'] - self.mac = module.params['mac'] - self.etherstub = module.params['etherstub'] - self.mtu = module.params['mtu'] - self.force = module.params['force'] - self.state = module.params['state'] - - self.nictagadm_bin = self.module.get_bin_path('nictagadm', True) - - def is_valid_mac(self): - return is_mac(self.mac.lower()) - - def nictag_exists(self): - cmd = [self.nictagadm_bin, 'exists', self.name] - (rc, dummy, dummy) = self.module.run_command(cmd) - - return rc == 0 - - def add_nictag(self): - cmd = [self.nictagadm_bin, '-v', 'add'] - - if self.etherstub: - cmd.append('-l') - - if self.mtu: - cmd.append('-p') - cmd.append('mtu=' + str(self.mtu)) - - if self.mac: - cmd.append('-p') - cmd.append('mac=' + str(self.mac)) - - cmd.append(self.name) - - return self.module.run_command(cmd) - - def delete_nictag(self): - cmd = [self.nictagadm_bin, '-v', 'delete'] - - if self.force: - cmd.append('-f') - - cmd.append(self.name) - - return self.module.run_command(cmd) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(type='str', required=True), - mac=dict(type='str'), - etherstub=dict(type='bool', default=False), - mtu=dict(type='int'), - force=dict(type='bool', default=False), - state=dict(type='str', default='present', choices=['absent', 'present']), - ), - mutually_exclusive=[ - ['etherstub', 'mac'], - ['etherstub', 'mtu'], - ], - required_if=[ - ['etherstub', False, ['name', 'mac']], - ['state', 'absent', ['name', 'force']], - ], - supports_check_mode=True - ) - - nictag = NicTag(module) - - rc = None - out = '' - err = '' - result = dict( - changed=False, - etherstub=nictag.etherstub, - force=nictag.force, - name=nictag.name, - mac=nictag.mac, - mtu=nictag.mtu, - state=nictag.state, - ) - - if not nictag.is_valid_mac(): - module.fail_json(msg='Invalid MAC Address Value', - name=nictag.name, - mac=nictag.mac, - etherstub=nictag.etherstub) - - if nictag.state == 'absent': - if nictag.nictag_exists(): - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = nictag.delete_nictag() - if rc != 0: - module.fail_json(name=nictag.name, msg=err, rc=rc) - elif nictag.state == 'present': - if not nictag.nictag_exists(): - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = nictag.add_nictag() - if rc is not None and rc != 0: - module.fail_json(name=nictag.name, msg=err, rc=rc) - - if rc is not None: - result['changed'] = True - if out: - result['stdout'] = out - if err: - result['stderr'] = err - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/smartos/smartos_image_info.py b/ansible_collections/community/general/plugins/modules/cloud/smartos/smartos_image_info.py deleted file mode 100644 index 369559f5..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/smartos/smartos_image_info.py +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2015, Adam Števko -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: smartos_image_info -short_description: Get SmartOS image details. -description: - - Retrieve information about all installed images on SmartOS. - - This module was called C(smartos_image_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.smartos_image_info) module no longer returns C(ansible_facts)! -author: Adam Števko (@xen0l) -options: - filters: - description: - - Criteria for selecting image. Can be any value from image - manifest and 'published_date', 'published', 'source', 'clones', - and 'size'. More information can be found at U(https://smartos.org/man/1m/imgadm) - under 'imgadm list'. - type: str -''' - -EXAMPLES = ''' -- name: Return information about all installed images - community.general.smartos_image_info: - register: result - -- name: Return all private active Linux images - community.general.smartos_image_info: - filters: "os=linux state=active public=false" - register: result - -- name: Show, how many clones does every image have - community.general.smartos_image_info: - register: result - -- name: Print information - ansible.builtin.debug: - msg: "{{ result.smartos_images[item]['name'] }}-{{ result.smartos_images[item]['version'] }} - has {{ result.smartos_images[item]['clones'] }} VM(s)" - with_items: "{{ result.smartos_images.keys() | list }}" - -- name: Print information - ansible.builtin.debug: - msg: "{{ smartos_images[item]['name'] }}-{{ smartos_images[item]['version'] }} - has {{ smartos_images[item]['clones'] }} VM(s)" - with_items: "{{ smartos_images.keys() | list }}" -''' - -RETURN = ''' -''' - -import json -from ansible.module_utils.basic import AnsibleModule - - -class ImageFacts(object): - - def __init__(self, module): - self.module = module - - self.filters = module.params['filters'] - - def return_all_installed_images(self): - cmd = [self.module.get_bin_path('imgadm'), 'list', '-j'] - - if self.filters: - cmd.append(self.filters) - - (rc, out, err) = self.module.run_command(cmd) - - if rc != 0: - self.module.exit_json( - msg='Failed to get all installed images', stderr=err) - - images = json.loads(out) - - result = {} - for image in images: - result[image['manifest']['uuid']] = image['manifest'] - # Merge additional attributes with the image manifest. - for attrib in ['clones', 'source', 'zpool']: - result[image['manifest']['uuid']][attrib] = image[attrib] - - return result - - -def main(): - module = AnsibleModule( - argument_spec=dict( - filters=dict(default=None), - ), - supports_check_mode=True, - ) - - image_facts = ImageFacts(module) - - data = dict(smartos_images=image_facts.return_all_installed_images()) - - module.exit_json(**data) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/smartos/vmadm.py b/ansible_collections/community/general/plugins/modules/cloud/smartos/vmadm.py deleted file mode 100644 index 03a02242..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/smartos/vmadm.py +++ /dev/null @@ -1,803 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2017, Jasper Lievisse Adriaanse -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: vmadm -short_description: Manage SmartOS virtual machines and zones. -description: - - Manage SmartOS virtual machines through vmadm(1M). -author: Jasper Lievisse Adriaanse (@jasperla) -options: - archive_on_delete: - required: false - description: - - When enabled, the zone dataset will be mounted on C(/zones/archive) - upon removal. - type: bool - autoboot: - required: false - description: - - Whether or not a VM is booted when the system is rebooted. - type: bool - brand: - choices: [ joyent, joyent-minimal, lx, kvm, bhyve ] - default: joyent - description: - - Type of virtual machine. The C(bhyve) option was added in community.general 0.2.0. - type: str - boot: - required: false - description: - - Set the boot order for KVM VMs. - type: str - cpu_cap: - required: false - description: - - Sets a limit on the amount of CPU time that can be used by a VM. - Use C(0) for no cap. - type: int - cpu_shares: - required: false - description: - - Sets a limit on the number of fair share scheduler (FSS) CPU shares for - a VM. This limit is relative to all other VMs on the system. - type: int - cpu_type: - required: false - choices: [ qemu64, host ] - default: qemu64 - description: - - Control the type of virtual CPU exposed to KVM VMs. - type: str - customer_metadata: - required: false - description: - - Metadata to be set and associated with this VM, this contain customer - modifiable keys. - type: dict - delegate_dataset: - required: false - description: - - Whether to delegate a ZFS dataset to an OS VM. - type: bool - disk_driver: - required: false - description: - - Default value for a virtual disk model for KVM guests. - type: str - disks: - required: false - description: - - A list of disks to add, valid properties are documented in vmadm(1M). - type: list - elements: dict - dns_domain: - required: false - description: - - Domain value for C(/etc/hosts). - type: str - docker: - required: false - description: - - Docker images need this flag enabled along with the I(brand) set to C(lx). - type: bool - filesystems: - required: false - description: - - Mount additional filesystems into an OS VM. - type: list - elements: dict - firewall_enabled: - required: false - description: - - Enables the firewall, allowing fwadm(1M) rules to be applied. - type: bool - force: - required: false - description: - - Force a particular action (i.e. stop or delete a VM). - type: bool - fs_allowed: - required: false - description: - - Comma separated list of filesystem types this zone is allowed to mount. - type: str - hostname: - required: false - description: - - Zone/VM hostname. - type: str - image_uuid: - required: false - description: - - Image UUID. - type: str - indestructible_delegated: - required: false - description: - - Adds an C(@indestructible) snapshot to delegated datasets. - type: bool - indestructible_zoneroot: - required: false - description: - - Adds an C(@indestructible) snapshot to zoneroot. - type: bool - internal_metadata: - required: false - description: - - Metadata to be set and associated with this VM, this contains operator - generated keys. - type: dict - internal_metadata_namespace: - required: false - description: - - List of namespaces to be set as I(internal_metadata-only); these namespaces - will come from I(internal_metadata) rather than I(customer_metadata). - type: str - kernel_version: - required: false - description: - - Kernel version to emulate for LX VMs. - type: str - limit_priv: - required: false - description: - - Set (comma separated) list of privileges the zone is allowed to use. - type: str - maintain_resolvers: - required: false - description: - - Resolvers in C(/etc/resolv.conf) will be updated when updating - the I(resolvers) property. - type: bool - max_locked_memory: - required: false - description: - - Total amount of memory (in MiBs) on the host that can be locked by this VM. - type: int - max_lwps: - required: false - description: - - Maximum number of lightweight processes this VM is allowed to have running. - type: int - max_physical_memory: - required: false - description: - - Maximum amount of memory (in MiBs) on the host that the VM is allowed to use. - type: int - max_swap: - required: false - description: - - Maximum amount of virtual memory (in MiBs) the VM is allowed to use. - type: int - mdata_exec_timeout: - required: false - description: - - Timeout in seconds (or 0 to disable) for the C(svc:/smartdc/mdata:execute) service - that runs user-scripts in the zone. - type: int - name: - required: false - aliases: [ alias ] - description: - - Name of the VM. vmadm(1M) uses this as an optional name. - type: str - nic_driver: - required: false - description: - - Default value for a virtual NIC model for KVM guests. - type: str - nics: - required: false - description: - - A list of nics to add, valid properties are documented in vmadm(1M). - type: list - elements: dict - nowait: - required: false - description: - - Consider the provisioning complete when the VM first starts, rather than - when the VM has rebooted. - type: bool - qemu_opts: - required: false - description: - - Additional qemu arguments for KVM guests. This overwrites the default arguments - provided by vmadm(1M) and should only be used for debugging. - type: str - qemu_extra_opts: - required: false - description: - - Additional qemu cmdline arguments for KVM guests. - type: str - quota: - required: false - description: - - Quota on zone filesystems (in MiBs). - type: int - ram: - required: false - description: - - Amount of virtual RAM for a KVM guest (in MiBs). - type: int - resolvers: - required: false - description: - - List of resolvers to be put into C(/etc/resolv.conf). - type: list - elements: str - routes: - required: false - description: - - Dictionary that maps destinations to gateways, these will be set as static - routes in the VM. - type: dict - spice_opts: - required: false - description: - - Addition options for SPICE-enabled KVM VMs. - type: str - spice_password: - required: false - description: - - Password required to connect to SPICE. By default no password is set. - Please note this can be read from the Global Zone. - type: str - state: - choices: [ present, running, absent, deleted, stopped, created, restarted, rebooted ] - default: running - description: - - States for the VM to be in. Please note that C(present), C(stopped) and C(restarted) - operate on a VM that is currently provisioned. C(present) means that the VM will be - created if it was absent, and that it will be in a running state. C(absent) will - shutdown the zone before removing it. - C(stopped) means the zone will be created if it doesn't exist already, before shutting - it down. - type: str - tmpfs: - required: false - description: - - Amount of memory (in MiBs) that will be available in the VM for the C(/tmp) filesystem. - type: int - uuid: - required: false - description: - - UUID of the VM. Can either be a full UUID or C(*) for all VMs. - type: str - vcpus: - required: false - description: - - Number of virtual CPUs for a KVM guest. - type: int - vga: - required: false - description: - - Specify VGA emulation used by KVM VMs. - type: str - virtio_txburst: - required: false - description: - - Number of packets that can be sent in a single flush of the tx queue of virtio NICs. - type: int - virtio_txtimer: - required: false - description: - - Timeout (in nanoseconds) for the TX timer of virtio NICs. - type: int - vnc_password: - required: false - description: - - Password required to connect to VNC. By default no password is set. - Please note this can be read from the Global Zone. - type: str - vnc_port: - required: false - description: - - TCP port to listen of the VNC server. Or set C(0) for random, - or C(-1) to disable. - type: int - zfs_data_compression: - required: false - description: - - Specifies compression algorithm used for this VMs data dataset. This option - only has effect on delegated datasets. - type: str - zfs_data_recsize: - required: false - description: - - Suggested block size (power of 2) for files in the delegated dataset's filesystem. - type: int - zfs_filesystem_limit: - required: false - description: - - Maximum number of filesystems the VM can have. - type: int - zfs_io_priority: - required: false - description: - - IO throttle priority value relative to other VMs. - type: int - zfs_root_compression: - required: false - description: - - Specifies compression algorithm used for this VMs root dataset. This option - only has effect on the zoneroot dataset. - type: str - zfs_root_recsize: - required: false - description: - - Suggested block size (power of 2) for files in the zoneroot dataset's filesystem. - type: int - zfs_snapshot_limit: - required: false - description: - - Number of snapshots the VM can have. - type: int - zpool: - required: false - description: - - ZFS pool the VM's zone dataset will be created in. - type: str -requirements: - - python >= 2.6 -''' - -EXAMPLES = ''' -- name: Create SmartOS zone - community.general.vmadm: - brand: joyent - state: present - alias: fw_zone - image_uuid: 95f265b8-96b2-11e6-9597-972f3af4b6d5 - firewall_enabled: yes - indestructible_zoneroot: yes - nics: - - nic_tag: admin - ip: dhcp - primary: true - internal_metadata: - root_pw: 'secret' - quota: 1 - -- name: Delete a zone - community.general.vmadm: - alias: test_zone - state: deleted - -- name: Stop all zones - community.general.vmadm: - uuid: '*' - state: stopped -''' - -RETURN = ''' -uuid: - description: UUID of the managed VM. - returned: always - type: str - sample: 'b217ab0b-cf57-efd8-cd85-958d0b80be33' -alias: - description: Alias of the managed VM. - returned: When addressing a VM by alias. - type: str - sample: 'dns-zone' -state: - description: State of the target, after execution. - returned: success - type: str - sample: 'running' -''' - -import json -import os -import re -import tempfile -import traceback - - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - -# While vmadm(1M) supports a -E option to return any errors in JSON, the -# generated JSON does not play well with the JSON parsers of Python. -# The returned message contains '\n' as part of the stacktrace, -# which breaks the parsers. - - -def get_vm_prop(module, uuid, prop): - # Lookup a property for the given VM. - # Returns the property, or None if not found. - cmd = '{0} lookup -j -o {1} uuid={2}'.format(module.vmadm, prop, uuid) - - (rc, stdout, stderr) = module.run_command(cmd) - - if rc != 0: - module.fail_json( - msg='Could not perform lookup of {0} on {1}'.format(prop, uuid), exception=stderr) - - try: - stdout_json = json.loads(stdout) - except Exception as e: - module.fail_json( - msg='Invalid JSON returned by vmadm for uuid lookup of {0}'.format(prop), - details=to_native(e), exception=traceback.format_exc()) - - if len(stdout_json) > 0 and prop in stdout_json[0]: - return stdout_json[0][prop] - else: - return None - - -def get_vm_uuid(module, alias): - # Lookup the uuid that goes with the given alias. - # Returns the uuid or '' if not found. - cmd = '{0} lookup -j -o uuid alias={1}'.format(module.vmadm, alias) - - (rc, stdout, stderr) = module.run_command(cmd) - - if rc != 0: - module.fail_json( - msg='Could not retrieve UUID of {0}'.format(alias), exception=stderr) - - # If no VM was found matching the given alias, we get back an empty array. - # That is not an error condition as we might be explicitly checking it's - # absence. - if stdout.strip() == '[]': - return None - else: - try: - stdout_json = json.loads(stdout) - except Exception as e: - module.fail_json( - msg='Invalid JSON returned by vmadm for uuid lookup of {0}'.format(alias), - details=to_native(e), exception=traceback.format_exc()) - - if len(stdout_json) > 0 and 'uuid' in stdout_json[0]: - return stdout_json[0]['uuid'] - - -def get_all_vm_uuids(module): - # Retrieve the UUIDs for all VMs. - cmd = '{0} lookup -j -o uuid'.format(module.vmadm) - - (rc, stdout, stderr) = module.run_command(cmd) - - if rc != 0: - module.fail_json(msg='Failed to get VMs list', exception=stderr) - - try: - stdout_json = json.loads(stdout) - return [v['uuid'] for v in stdout_json] - except Exception as e: - module.fail_json(msg='Could not retrieve VM UUIDs', details=to_native(e), - exception=traceback.format_exc()) - - -def new_vm(module, uuid, vm_state): - payload_file = create_payload(module, uuid) - - (rc, stdout, stderr) = vmadm_create_vm(module, payload_file) - - if rc != 0: - changed = False - module.fail_json(msg='Could not create VM', exception=stderr) - else: - changed = True - # 'vmadm create' returns all output to stderr... - match = re.match('Successfully created VM (.*)', stderr) - if match: - vm_uuid = match.groups()[0] - if not is_valid_uuid(vm_uuid): - module.fail_json(msg='Invalid UUID for VM {0}?'.format(vm_uuid)) - else: - module.fail_json(msg='Could not retrieve UUID of newly created(?) VM') - - # Now that the VM is created, ensure it is in the desired state (if not 'running') - if vm_state != 'running': - ret = set_vm_state(module, vm_uuid, vm_state) - if not ret: - module.fail_json(msg='Could not set VM {0} to state {1}'.format(vm_uuid, vm_state)) - - try: - os.unlink(payload_file) - except Exception as e: - # Since the payload may contain sensitive information, fail hard - # if we cannot remove the file so the operator knows about it. - module.fail_json(msg='Could not remove temporary JSON payload file {0}: {1}'.format(payload_file, to_native(e)), - exception=traceback.format_exc()) - - return changed, vm_uuid - - -def vmadm_create_vm(module, payload_file): - # Create a new VM using the provided payload. - cmd = '{0} create -f {1}'.format(module.vmadm, payload_file) - - return module.run_command(cmd) - - -def set_vm_state(module, vm_uuid, vm_state): - p = module.params - - # Check if the VM is already in the desired state. - state = get_vm_prop(module, vm_uuid, 'state') - if state and (state == vm_state): - return None - - # Lookup table for the state to be in, and which command to use for that. - # vm_state: [vmadm commandm, forceable?] - cmds = { - 'stopped': ['stop', True], - 'running': ['start', False], - 'deleted': ['delete', True], - 'rebooted': ['reboot', False] - } - - if p['force'] and cmds[vm_state][1]: - force = '-F' - else: - force = '' - - cmd = 'vmadm {0} {1} {2}'.format(cmds[vm_state][0], force, vm_uuid) - - (rc, stdout, stderr) = module.run_command(cmd) - - match = re.match('^Successfully.*', stderr) - if match: - return True - else: - return False - - -def create_payload(module, uuid): - # Create the JSON payload (vmdef) and return the filename. - - # Filter out the few options that are not valid VM properties. - module_options = ['debug', 'force', 'state'] - # @TODO make this a simple {} comprehension as soon as py2 is ditched - # @TODO {k: v for k, v in p.items() if k not in module_options} - vmdef = dict([(k, v) for k, v in module.params.items() if k not in module_options and v]) - - try: - vmdef_json = json.dumps(vmdef) - except Exception as e: - module.fail_json( - msg='Could not create valid JSON payload', exception=traceback.format_exc()) - - # Create the temporary file that contains our payload, and set tight - # permissions for it may container sensitive information. - try: - # XXX: When there's a way to get the current ansible temporary directory - # drop the mkstemp call and rely on ANSIBLE_KEEP_REMOTE_FILES to retain - # the payload (thus removing the `save_payload` option). - fname = tempfile.mkstemp()[1] - os.chmod(fname, 0o400) - with open(fname, 'w') as fh: - fh.write(vmdef_json) - except Exception as e: - module.fail_json(msg='Could not save JSON payload: %s' % to_native(e), exception=traceback.format_exc()) - - return fname - - -def vm_state_transition(module, uuid, vm_state): - ret = set_vm_state(module, uuid, vm_state) - - # Whether the VM changed state. - if ret is None: - return False - elif ret: - return True - else: - module.fail_json(msg='Failed to set VM {0} to state {1}'.format(uuid, vm_state)) - - -def is_valid_uuid(uuid): - if re.match('^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', uuid, re.IGNORECASE): - return True - else: - return False - - -def validate_uuids(module): - # Perform basic UUID validation. - failed = [] - - for u in [['uuid', module.params['uuid']], - ['image_uuid', module.params['image_uuid']]]: - if u[1] and u[1] != '*': - if not is_valid_uuid(u[1]): - failed.append(u[0]) - - if len(failed) > 0: - module.fail_json(msg='No valid UUID(s) found for: {0}'.format(", ".join(failed))) - - -def manage_all_vms(module, vm_state): - # Handle operations for all VMs, which can by definition only - # be state transitions. - state = module.params['state'] - - if state == 'created': - module.fail_json(msg='State "created" is only valid for tasks with a single VM') - - # If any of the VMs has a change, the task as a whole has a change. - any_changed = False - - # First get all VM uuids and for each check their state, and adjust it if needed. - for uuid in get_all_vm_uuids(module): - current_vm_state = get_vm_prop(module, uuid, 'state') - if not current_vm_state and vm_state == 'deleted': - any_changed = False - else: - if module.check_mode: - if (not current_vm_state) or (get_vm_prop(module, uuid, 'state') != state): - any_changed = True - else: - any_changed = (vm_state_transition(module, uuid, vm_state) | any_changed) - - return any_changed - - -def main(): - # In order to reduce the clutter and boilerplate for trivial options, - # abstract the vmadm properties and build the dict of arguments later. - # Dict of all options that are simple to define based on their type. - # They're not required and have a default of None. - properties = { - 'str': [ - 'boot', 'disk_driver', 'dns_domain', 'fs_allowed', 'hostname', - 'image_uuid', 'internal_metadata_namespace', 'kernel_version', - 'limit_priv', 'nic_driver', 'qemu_opts', 'qemu_extra_opts', - 'spice_opts', 'uuid', 'vga', 'zfs_data_compression', - 'zfs_root_compression', 'zpool' - ], - 'bool': [ - 'archive_on_delete', 'autoboot', 'debug', 'delegate_dataset', - 'docker', 'firewall_enabled', 'force', 'indestructible_delegated', - 'indestructible_zoneroot', 'maintain_resolvers', 'nowait' - ], - 'int': [ - 'cpu_cap', 'cpu_shares', 'max_locked_memory', 'max_lwps', - 'max_physical_memory', 'max_swap', 'mdata_exec_timeout', - 'quota', 'ram', 'tmpfs', 'vcpus', 'virtio_txburst', - 'virtio_txtimer', 'vnc_port', 'zfs_data_recsize', - 'zfs_filesystem_limit', 'zfs_io_priority', 'zfs_root_recsize', - 'zfs_snapshot_limit' - ], - 'dict': ['customer_metadata', 'internal_metadata', 'routes'], - } - - # Start with the options that are not as trivial as those above. - options = dict( - state=dict( - default='running', - type='str', - choices=['present', 'running', 'absent', 'deleted', 'stopped', 'created', 'restarted', 'rebooted'] - ), - name=dict( - default=None, type='str', - aliases=['alias'] - ), - brand=dict( - default='joyent', - type='str', - choices=['joyent', 'joyent-minimal', 'lx', 'kvm', 'bhyve'] - ), - cpu_type=dict( - default='qemu64', - type='str', - choices=['host', 'qemu64'] - ), - # Regular strings, however these require additional options. - spice_password=dict(type='str', no_log=True), - vnc_password=dict(type='str', no_log=True), - disks=dict(type='list', elements='dict'), - nics=dict(type='list', elements='dict'), - resolvers=dict(type='list', elements='str'), - filesystems=dict(type='list', elements='dict'), - ) - - # Add our 'simple' options to options dict. - for type in properties: - for p in properties[type]: - option = dict(default=None, type=type) - options[p] = option - - module = AnsibleModule( - argument_spec=options, - supports_check_mode=True, - required_one_of=[['name', 'uuid']] - ) - - module.vmadm = module.get_bin_path('vmadm', required=True) - - p = module.params - uuid = p['uuid'] - state = p['state'] - - # Translate the state parameter into something we can use later on. - if state in ['present', 'running']: - vm_state = 'running' - elif state in ['stopped', 'created']: - vm_state = 'stopped' - elif state in ['absent', 'deleted']: - vm_state = 'deleted' - elif state in ['restarted', 'rebooted']: - vm_state = 'rebooted' - - result = {'state': state} - - # While it's possible to refer to a given VM by it's `alias`, it's easier - # to operate on VMs by their UUID. So if we're not given a `uuid`, look - # it up. - if not uuid: - uuid = get_vm_uuid(module, p['name']) - # Bit of a chicken and egg problem here for VMs with state == deleted. - # If they're going to be removed in this play, we have to lookup the - # uuid. If they're already deleted there's nothing to lookup. - # So if state == deleted and get_vm_uuid() returned '', the VM is already - # deleted and there's nothing else to do. - if uuid is None and vm_state == 'deleted': - result['name'] = p['name'] - module.exit_json(**result) - - validate_uuids(module) - - if p['name']: - result['name'] = p['name'] - result['uuid'] = uuid - - if uuid == '*': - result['changed'] = manage_all_vms(module, vm_state) - module.exit_json(**result) - - # The general flow is as follows: - # - first the current state of the VM is obtained by it's UUID. - # - If the state was not found and the desired state is 'deleted', return. - # - If the state was not found, it means the VM has to be created. - # Subsequently the VM will be set to the desired state (i.e. stopped) - # - Otherwise, it means the VM exists already and we operate on it's - # state (i.e. reboot it.) - # - # In the future it should be possible to query the VM for a particular - # property as a valid state (i.e. queried) so the result can be - # registered. - # Also, VMs should be able to get their properties updated. - # Managing VM snapshots should be part of a standalone module. - - # First obtain the VM state to determine what needs to be done with it. - current_vm_state = get_vm_prop(module, uuid, 'state') - - # First handle the case where the VM should be deleted and is not present. - if not current_vm_state and vm_state == 'deleted': - result['changed'] = False - elif module.check_mode: - # Shortcut for check mode, if there is no VM yet, it will need to be created. - # Or, if the VM is not in the desired state yet, it needs to transition. - if (not current_vm_state) or (get_vm_prop(module, uuid, 'state') != state): - result['changed'] = True - else: - result['changed'] = False - - module.exit_json(**result) - # No VM was found that matched the given ID (alias or uuid), so we create it. - elif not current_vm_state: - result['changed'], result['uuid'] = new_vm(module, uuid, vm_state) - else: - # VM was found, operate on its state directly. - result['changed'] = vm_state_transition(module, uuid, vm_state) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/softlayer/sl_vm.py b/ansible_collections/community/general/plugins/modules/cloud/softlayer/sl_vm.py deleted file mode 100644 index 825d82e1..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/softlayer/sl_vm.py +++ /dev/null @@ -1,430 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: sl_vm -short_description: create or cancel a virtual instance in SoftLayer -description: - - Creates or cancels SoftLayer instances. - - When created, optionally waits for it to be 'running'. -options: - instance_id: - description: - - Instance Id of the virtual instance to perform action option. - type: str - hostname: - description: - - Hostname to be provided to a virtual instance. - type: str - domain: - description: - - Domain name to be provided to a virtual instance. - type: str - datacenter: - description: - - Datacenter for the virtual instance to be deployed. - type: str - choices: - - ams01 - - ams03 - - che01 - - dal01 - - dal05 - - dal06 - - dal09 - - dal10 - - dal12 - - dal13 - - fra02 - - fra04 - - fra05 - - hkg02 - - hou02 - - lon02 - - lon04 - - lon06 - - mel01 - - mex01 - - mil01 - - mon01 - - osl01 - - par01 - - sao01 - - sea01 - - seo01 - - sjc01 - - sjc03 - - sjc04 - - sng01 - - syd01 - - syd04 - - tok02 - - tor01 - - wdc01 - - wdc04 - - wdc06 - - wdc07 - tags: - description: - - Tag or list of tags to be provided to a virtual instance. - type: str - hourly: - description: - - Flag to determine if the instance should be hourly billed. - type: bool - default: 'yes' - private: - description: - - Flag to determine if the instance should be private only. - type: bool - default: 'no' - dedicated: - description: - - Flag to determine if the instance should be deployed in dedicated space. - type: bool - default: 'no' - local_disk: - description: - - Flag to determine if local disk should be used for the new instance. - type: bool - default: 'yes' - cpus: - description: - - Count of cpus to be assigned to new virtual instance. - type: int - choices: [1, 2, 4, 8, 16, 32, 56] - memory: - description: - - Amount of memory to be assigned to new virtual instance. - type: int - choices: [1024, 2048, 4096, 6144, 8192, 12288, 16384, 32768, 49152, 65536, 131072, 247808] - flavor: - description: - - Specify which SoftLayer flavor template to use instead of cpus and memory. - version_added: '0.2.0' - type: str - disks: - description: - - List of disk sizes to be assigned to new virtual instance. - default: [ 25 ] - type: list - elements: int - os_code: - description: - - OS Code to be used for new virtual instance. - type: str - image_id: - description: - - Image Template to be used for new virtual instance. - type: str - nic_speed: - description: - - NIC Speed to be assigned to new virtual instance. - choices: [10, 100, 1000] - type: int - public_vlan: - description: - - VLAN by its Id to be assigned to the public NIC. - type: str - private_vlan: - description: - - VLAN by its Id to be assigned to the private NIC. - type: str - ssh_keys: - description: - - List of ssh keys by their Id to be assigned to a virtual instance. - type: list - elements: str - post_uri: - description: - - URL of a post provisioning script to be loaded and executed on virtual instance. - type: str - state: - description: - - Create, or cancel a virtual instance. - - Specify C(present) for create, C(absent) to cancel. - choices: [ absent, present ] - default: present - type: str - wait: - description: - - Flag used to wait for active status before returning. - type: bool - default: 'yes' - wait_time: - description: - - Time in seconds before wait returns. - default: 600 - type: int -requirements: - - python >= 2.6 - - softlayer >= 4.1.1 -author: -- Matt Colton (@mcltn) -''' - -EXAMPLES = ''' -- name: Build instance - hosts: localhost - gather_facts: no - tasks: - - name: Build instance request - community.general.sl_vm: - hostname: instance-1 - domain: anydomain.com - datacenter: dal09 - tags: ansible-module-test - hourly: yes - private: no - dedicated: no - local_disk: yes - cpus: 1 - memory: 1024 - disks: [25] - os_code: UBUNTU_LATEST - wait: no - -- name: Build additional instances - hosts: localhost - gather_facts: no - tasks: - - name: Build instances request - community.general.sl_vm: - hostname: "{{ item.hostname }}" - domain: "{{ item.domain }}" - datacenter: "{{ item.datacenter }}" - tags: "{{ item.tags }}" - hourly: "{{ item.hourly }}" - private: "{{ item.private }}" - dedicated: "{{ item.dedicated }}" - local_disk: "{{ item.local_disk }}" - cpus: "{{ item.cpus }}" - memory: "{{ item.memory }}" - disks: "{{ item.disks }}" - os_code: "{{ item.os_code }}" - ssh_keys: "{{ item.ssh_keys }}" - wait: "{{ item.wait }}" - with_items: - - hostname: instance-2 - domain: anydomain.com - datacenter: dal09 - tags: - - ansible-module-test - - ansible-module-test-replicas - hourly: yes - private: no - dedicated: no - local_disk: yes - cpus: 1 - memory: 1024 - disks: - - 25 - - 100 - os_code: UBUNTU_LATEST - ssh_keys: [] - wait: True - - hostname: instance-3 - domain: anydomain.com - datacenter: dal09 - tags: - - ansible-module-test - - ansible-module-test-replicas - hourly: yes - private: no - dedicated: no - local_disk: yes - cpus: 1 - memory: 1024 - disks: - - 25 - - 100 - os_code: UBUNTU_LATEST - ssh_keys: [] - wait: yes - -- name: Cancel instances - hosts: localhost - gather_facts: no - tasks: - - name: Cancel by tag - community.general.sl_vm: - state: absent - tags: ansible-module-test -''' - -# TODO: Disabled RETURN as it is breaking the build for docs. Needs to be fixed. -RETURN = '''# ''' - -import json -import time - -try: - import SoftLayer - from SoftLayer import VSManager - - HAS_SL = True - vsManager = VSManager(SoftLayer.create_client_from_env()) -except ImportError: - HAS_SL = False - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six import string_types - - -# TODO: get this info from API -STATES = ['present', 'absent'] -DATACENTERS = ['ams01', 'ams03', 'che01', 'dal01', 'dal05', 'dal06', 'dal09', 'dal10', 'dal12', 'dal13', 'fra02', - 'fra04', 'fra05', 'hkg02', 'hou02', 'lon02', 'lon04', 'lon06', 'mel01', 'mex01', 'mil01', 'mon01', - 'osl01', 'par01', 'sao01', 'sea01', 'seo01', 'sjc01', 'sjc03', 'sjc04', 'sng01', 'syd01', 'syd04', - 'tok02', 'tor01', 'wdc01', 'wdc04', 'wdc06', 'wdc07'] -CPU_SIZES = [1, 2, 4, 8, 16, 32, 56] -MEMORY_SIZES = [1024, 2048, 4096, 6144, 8192, 12288, 16384, 32768, 49152, 65536, 131072, 247808] -INITIALDISK_SIZES = [25, 100] -LOCALDISK_SIZES = [25, 100, 150, 200, 300] -SANDISK_SIZES = [10, 20, 25, 30, 40, 50, 75, 100, 125, 150, 175, 200, 250, 300, 350, 400, 500, 750, 1000, 1500, 2000] -NIC_SPEEDS = [10, 100, 1000] - - -def create_virtual_instance(module): - - instances = vsManager.list_instances( - hostname=module.params.get('hostname'), - domain=module.params.get('domain'), - datacenter=module.params.get('datacenter') - ) - - if instances: - return False, None - - # Check if OS or Image Template is provided (Can't be both, defaults to OS) - if (module.params.get('os_code') is not None and module.params.get('os_code') != ''): - module.params['image_id'] = '' - elif (module.params.get('image_id') is not None and module.params.get('image_id') != ''): - module.params['os_code'] = '' - module.params['disks'] = [] # Blank out disks since it will use the template - else: - return False, None - - tags = module.params.get('tags') - if isinstance(tags, list): - tags = ','.join(map(str, module.params.get('tags'))) - - instance = vsManager.create_instance( - hostname=module.params.get('hostname'), - domain=module.params.get('domain'), - cpus=module.params.get('cpus'), - memory=module.params.get('memory'), - flavor=module.params.get('flavor'), - hourly=module.params.get('hourly'), - datacenter=module.params.get('datacenter'), - os_code=module.params.get('os_code'), - image_id=module.params.get('image_id'), - local_disk=module.params.get('local_disk'), - disks=module.params.get('disks'), - ssh_keys=module.params.get('ssh_keys'), - nic_speed=module.params.get('nic_speed'), - private=module.params.get('private'), - public_vlan=module.params.get('public_vlan'), - private_vlan=module.params.get('private_vlan'), - dedicated=module.params.get('dedicated'), - post_uri=module.params.get('post_uri'), - tags=tags, - ) - - if instance is not None and instance['id'] > 0: - return True, instance - else: - return False, None - - -def wait_for_instance(module, id): - instance = None - completed = False - wait_timeout = time.time() + module.params.get('wait_time') - while not completed and wait_timeout > time.time(): - try: - completed = vsManager.wait_for_ready(id, 10, 2) - if completed: - instance = vsManager.get_instance(id) - except Exception: - completed = False - - return completed, instance - - -def cancel_instance(module): - canceled = True - if module.params.get('instance_id') is None and (module.params.get('tags') or module.params.get('hostname') or module.params.get('domain')): - tags = module.params.get('tags') - if isinstance(tags, string_types): - tags = [module.params.get('tags')] - instances = vsManager.list_instances(tags=tags, hostname=module.params.get('hostname'), domain=module.params.get('domain')) - for instance in instances: - try: - vsManager.cancel_instance(instance['id']) - except Exception: - canceled = False - elif module.params.get('instance_id') and module.params.get('instance_id') != 0: - try: - vsManager.cancel_instance(instance['id']) - except Exception: - canceled = False - else: - return False, None - - return canceled, None - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - instance_id=dict(type='str'), - hostname=dict(type='str'), - domain=dict(type='str'), - datacenter=dict(type='str', choices=DATACENTERS), - tags=dict(type='str'), - hourly=dict(type='bool', default=True), - private=dict(type='bool', default=False), - dedicated=dict(type='bool', default=False), - local_disk=dict(type='bool', default=True), - cpus=dict(type='int', choices=CPU_SIZES), - memory=dict(type='int', choices=MEMORY_SIZES), - flavor=dict(type='str'), - disks=dict(type='list', elements='int', default=[25]), - os_code=dict(type='str'), - image_id=dict(type='str'), - nic_speed=dict(type='int', choices=NIC_SPEEDS), - public_vlan=dict(type='str'), - private_vlan=dict(type='str'), - ssh_keys=dict(type='list', elements='str', default=[], no_log=False), - post_uri=dict(type='str'), - state=dict(type='str', default='present', choices=STATES), - wait=dict(type='bool', default=True), - wait_time=dict(type='int', default=600), - ) - ) - - if not HAS_SL: - module.fail_json(msg='softlayer python library required for this module') - - if module.params.get('state') == 'absent': - (changed, instance) = cancel_instance(module) - - elif module.params.get('state') == 'present': - (changed, instance) = create_virtual_instance(module) - if module.params.get('wait') is True and instance: - (changed, instance) = wait_for_instance(module, instance['id']) - - module.exit_json(changed=changed, instance=json.loads(json.dumps(instance, default=lambda o: o.__dict__))) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py b/ansible_collections/community/general/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py deleted file mode 100644 index da8f0102..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py +++ /dev/null @@ -1,1557 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) - -DOCUMENTATION = ''' ---- -module: spotinst_aws_elastigroup -short_description: Create, update or delete Spotinst AWS Elastigroups -author: Spotinst (@talzur) -description: - - Can create, update, or delete Spotinst AWS Elastigroups - Launch configuration is part of the elastigroup configuration, - so no additional modules are necessary for handling the launch configuration. - You will have to have a credentials file in this location - /.spotinst/credentials - The credentials file must contain a row that looks like this - token = - Full documentation available at https://help.spotinst.com/hc/en-us/articles/115003530285-Ansible- -requirements: - - python >= 2.7 - - spotinst_sdk >= 1.0.38 -options: - - credentials_path: - description: - - Optional parameter that allows to set a non-default credentials path. - default: ~/.spotinst/credentials - type: path - - account_id: - description: - - Optional parameter that allows to set an account-id inside the module configuration. - By default this is retrieved from the credentials path. - type: str - - availability_vs_cost: - description: - - The strategy orientation. - - "The choices available are: C(availabilityOriented), C(costOriented), C(balanced)." - required: true - type: str - - availability_zones: - description: - - A list of hash/dictionaries of Availability Zones that are configured in the elastigroup; - '[{"key":"value", "key":"value"}]'; - keys allowed are - name (String), - subnet_id (String), - placement_group_name (String), - required: true - type: list - elements: dict - - block_device_mappings: - description: - - A list of hash/dictionaries of Block Device Mappings for elastigroup instances; - You can specify virtual devices and EBS volumes.; - '[{"key":"value", "key":"value"}]'; - keys allowed are - device_name (List of Strings), - virtual_name (String), - no_device (String), - ebs (Object, expects the following keys- - delete_on_termination(Boolean), - encrypted(Boolean), - iops (Integer), - snapshot_id(Integer), - volume_type(String), - volume_size(Integer)) - type: list - elements: dict - - chef: - description: - - The Chef integration configuration.; - Expects the following keys - chef_server (String), - organization (String), - user (String), - pem_key (String), - chef_version (String) - type: dict - - draining_timeout: - description: - - Time for instance to be drained from incoming requests and deregistered from ELB before termination. - type: int - - ebs_optimized: - description: - - Enable EBS optimization for supported instances which are not enabled by default.; - Note - additional charges will be applied. - type: bool - - ebs_volume_pool: - description: - - A list of hash/dictionaries of EBS devices to reattach to the elastigroup when available; - '[{"key":"value", "key":"value"}]'; - keys allowed are - - volume_ids (List of Strings), - device_name (String) - type: list - elements: dict - - ecs: - description: - - The ECS integration configuration.; - Expects the following key - - cluster_name (String) - type: dict - - elastic_ips: - description: - - List of ElasticIps Allocation Ids (Example C(eipalloc-9d4e16f8)) to associate to the group instances - type: list - elements: str - - fallback_to_od: - description: - - In case of no spots available, Elastigroup will launch an On-demand instance instead - type: bool - - health_check_grace_period: - description: - - The amount of time, in seconds, after the instance has launched to start and check its health. - - If not specified, it defaults to C(300). - type: int - - health_check_unhealthy_duration_before_replacement: - description: - - Minimal mount of time instance should be unhealthy for us to consider it unhealthy. - type: int - - health_check_type: - description: - - The service to use for the health check. - - "The choices available are: C(ELB), C(HCS), C(TARGET_GROUP), C(MLB), C(EC2)." - type: str - - iam_role_name: - description: - - The instance profile iamRole name - - Only use iam_role_arn, or iam_role_name - type: str - - iam_role_arn: - description: - - The instance profile iamRole arn - - Only use iam_role_arn, or iam_role_name - type: str - - id: - description: - - The group id if it already exists and you want to update, or delete it. - This will not work unless the uniqueness_by field is set to id. - When this is set, and the uniqueness_by field is set, the group will either be updated or deleted, but not created. - type: str - - image_id: - description: - - The image Id used to launch the instance.; - In case of conflict between Instance type and image type, an error will be returned - required: true - type: str - - key_pair: - description: - - Specify a Key Pair to attach to the instances - type: str - - kubernetes: - description: - - The Kubernetes integration configuration. - Expects the following keys - - api_server (String), - token (String) - type: dict - - lifetime_period: - description: - - Lifetime period - type: int - - load_balancers: - description: - - List of classic ELB names - type: list - elements: str - - max_size: - description: - - The upper limit number of instances that you can scale up to - required: true - type: int - - mesosphere: - description: - - The Mesosphere integration configuration. - Expects the following key - - api_server (String) - type: dict - - min_size: - description: - - The lower limit number of instances that you can scale down to - required: true - type: int - - monitoring: - description: - - Describes whether instance Enhanced Monitoring is enabled - type: str - - name: - description: - - Unique name for elastigroup to be created, updated or deleted - required: true - type: str - - network_interfaces: - description: - - A list of hash/dictionaries of network interfaces to add to the elastigroup; - '[{"key":"value", "key":"value"}]'; - keys allowed are - - description (String), - device_index (Integer), - secondary_private_ip_address_count (Integer), - associate_public_ip_address (Boolean), - delete_on_termination (Boolean), - groups (List of Strings), - network_interface_id (String), - private_ip_address (String), - subnet_id (String), - associate_ipv6_address (Boolean), - private_ip_addresses (List of Objects, Keys are privateIpAddress (String, required) and primary (Boolean)) - type: list - elements: dict - - on_demand_count: - description: - - Required if risk is not set - - Number of on demand instances to launch. All other instances will be spot instances.; - Either set this parameter or the risk parameter - type: int - - on_demand_instance_type: - description: - - On-demand instance type that will be provisioned - type: str - - opsworks: - description: - - The elastigroup OpsWorks integration configration.; - Expects the following key - - layer_id (String) - type: dict - - persistence: - description: - - The Stateful elastigroup configration.; - Accepts the following keys - - should_persist_root_device (Boolean), - should_persist_block_devices (Boolean), - should_persist_private_ip (Boolean) - type: dict - - product: - description: - - Operation system type. - - "Available choices are: C(Linux/UNIX), C(SUSE Linux), C(Windows), C(Linux/UNIX (Amazon VPC)), C(SUSE Linux (Amazon VPC))." - required: true - type: str - - rancher: - description: - - The Rancher integration configuration.; - Expects the following keys - - version (String), - access_key (String), - secret_key (String), - master_host (String) - type: dict - - right_scale: - description: - - The Rightscale integration configuration.; - Expects the following keys - - account_id (String), - refresh_token (String) - type: dict - - risk: - description: - - Required if on demand is not set. The percentage of Spot instances to launch (0 - 100). - type: int - - roll_config: - description: - - Roll configuration.; - If you would like the group to roll after updating, please use this feature. - Accepts the following keys - - batch_size_percentage(Integer, Required), - grace_period - (Integer, Required), - health_check_type(String, Optional) - type: dict - - scheduled_tasks: - description: - - A list of hash/dictionaries of scheduled tasks to configure in the elastigroup; - '[{"key":"value", "key":"value"}]'; - keys allowed are - - adjustment (Integer), - scale_target_capacity (Integer), - scale_min_capacity (Integer), - scale_max_capacity (Integer), - adjustment_percentage (Integer), - batch_size_percentage (Integer), - cron_expression (String), - frequency (String), - grace_period (Integer), - task_type (String, required), - is_enabled (Boolean) - type: list - elements: dict - - security_group_ids: - description: - - One or more security group IDs. ; - In case of update it will override the existing Security Group with the new given array - required: true - type: list - elements: str - - shutdown_script: - description: - - The Base64-encoded shutdown script that executes prior to instance termination. - Encode before setting. - type: str - - signals: - description: - - A list of hash/dictionaries of signals to configure in the elastigroup; - keys allowed are - - name (String, required), - timeout (Integer) - type: list - elements: dict - - spin_up_time: - description: - - Spin up time, in seconds, for the instance - type: int - - spot_instance_types: - description: - - Spot instance type that will be provisioned. - required: true - type: list - elements: str - - state: - choices: - - present - - absent - description: - - Create or delete the elastigroup - default: present - type: str - - tags: - description: - - A list of tags to configure in the elastigroup. Please specify list of keys and values (key colon value); - type: list - elements: dict - - target: - description: - - The number of instances to launch - required: true - type: int - - target_group_arns: - description: - - List of target group arns instances should be registered to - type: list - elements: str - - tenancy: - description: - - Dedicated vs shared tenancy. - - "The available choices are: C(default), C(dedicated)." - type: str - - terminate_at_end_of_billing_hour: - description: - - Terminate at the end of billing hour - type: bool - - unit: - description: - - The capacity unit to launch instances by. - - "The available choices are: C(instance), C(weight)." - type: str - - up_scaling_policies: - description: - - A list of hash/dictionaries of scaling policies to configure in the elastigroup; - '[{"key":"value", "key":"value"}]'; - keys allowed are - - policy_name (String, required), - namespace (String, required), - metric_name (String, required), - dimensions (List of Objects, Keys allowed are name (String, required) and value (String)), - statistic (String, required) - evaluation_periods (String, required), - period (String, required), - threshold (String, required), - cooldown (String, required), - unit (String, required), - operator (String, required), - action_type (String, required), - adjustment (String), - min_target_capacity (String), - target (String), - maximum (String), - minimum (String) - type: list - elements: dict - - down_scaling_policies: - description: - - A list of hash/dictionaries of scaling policies to configure in the elastigroup; - '[{"key":"value", "key":"value"}]'; - keys allowed are - - policy_name (String, required), - namespace (String, required), - metric_name (String, required), - dimensions ((List of Objects), Keys allowed are name (String, required) and value (String)), - statistic (String, required), - evaluation_periods (String, required), - period (String, required), - threshold (String, required), - cooldown (String, required), - unit (String, required), - operator (String, required), - action_type (String, required), - adjustment (String), - max_target_capacity (String), - target (String), - maximum (String), - minimum (String) - type: list - elements: dict - - target_tracking_policies: - description: - - A list of hash/dictionaries of target tracking policies to configure in the elastigroup; - '[{"key":"value", "key":"value"}]'; - keys allowed are - - policy_name (String, required), - namespace (String, required), - source (String, required), - metric_name (String, required), - statistic (String, required), - unit (String, required), - cooldown (String, required), - target (String, required) - type: list - elements: dict - - uniqueness_by: - choices: - - id - - name - description: - - If your group names are not unique, you may use this feature to update or delete a specific group. - Whenever this property is set, you must set a group_id in order to update or delete a group, otherwise a group will be created. - default: name - type: str - - user_data: - description: - - Base64-encoded MIME user data. Encode before setting the value. - type: str - - utilize_reserved_instances: - description: - - In case of any available Reserved Instances, - Elastigroup will utilize your reservations before purchasing Spot instances. - type: bool - - wait_for_instances: - description: - - Whether or not the elastigroup creation / update actions should wait for the instances to spin - type: bool - default: false - - wait_timeout: - description: - - How long the module should wait for instances before failing the action.; - Only works if wait_for_instances is True. - type: int - -''' -EXAMPLES = ''' -# Basic configuration YAML example - -- hosts: localhost - tasks: - - name: Create elastigroup - community.general.spotinst_aws_elastigroup: - state: present - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-west-2a - subnet_id: subnet-2b68a15c - image_id: ami-f173cc91 - key_pair: spotinst-oregon - max_size: 15 - min_size: 0 - target: 0 - unit: instance - monitoring: True - name: ansible-group - on_demand_instance_type: c3.large - product: Linux/UNIX - load_balancers: - - test-lb-1 - security_group_ids: - - sg-8f4b8fe9 - spot_instance_types: - - c3.large - do_not_update: - - image_id - - target - register: result - - ansible.builtin.debug: var=result - -# In this example, we create an elastigroup and wait 600 seconds to retrieve the instances, and use their private ips - -- hosts: localhost - tasks: - - name: Create elastigroup - community.general.spotinst_aws_elastigroup: - state: present - account_id: act-1a9dd2b - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-west-2a - subnet_id: subnet-2b68a15c - tags: - - Environment: someEnvValue - - OtherTagKey: otherValue - image_id: ami-f173cc91 - key_pair: spotinst-oregon - max_size: 5 - min_size: 0 - target: 0 - unit: instance - monitoring: True - name: ansible-group-tal - on_demand_instance_type: c3.large - product: Linux/UNIX - security_group_ids: - - sg-8f4b8fe9 - block_device_mappings: - - device_name: '/dev/sda1' - ebs: - volume_size: 100 - volume_type: gp2 - spot_instance_types: - - c3.large - do_not_update: - - image_id - wait_for_instances: True - wait_timeout: 600 - register: result - - - name: Store private ips to file - ansible.builtin.shell: echo {{ item.private_ip }}\\n >> list-of-private-ips - with_items: "{{ result.instances }}" - - ansible.builtin.debug: var=result - -# In this example, we create an elastigroup with multiple block device mappings, tags, and also an account id -# In organizations with more than one account, it is required to specify an account_id - -- hosts: localhost - tasks: - - name: Create elastigroup - community.general.spotinst_aws_elastigroup: - state: present - account_id: act-1a9dd2b - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-west-2a - subnet_id: subnet-2b68a15c - tags: - - Environment: someEnvValue - - OtherTagKey: otherValue - image_id: ami-f173cc91 - key_pair: spotinst-oregon - max_size: 5 - min_size: 0 - target: 0 - unit: instance - monitoring: True - name: ansible-group-tal - on_demand_instance_type: c3.large - product: Linux/UNIX - security_group_ids: - - sg-8f4b8fe9 - block_device_mappings: - - device_name: '/dev/xvda' - ebs: - volume_size: 60 - volume_type: gp2 - - device_name: '/dev/xvdb' - ebs: - volume_size: 120 - volume_type: gp2 - spot_instance_types: - - c3.large - do_not_update: - - image_id - wait_for_instances: True - wait_timeout: 600 - register: result - - - name: Store private ips to file - ansible.builtin.shell: echo {{ item.private_ip }}\\n >> list-of-private-ips - with_items: "{{ result.instances }}" - - ansible.builtin.debug: var=result - -# In this example we have set up block device mapping with ephemeral devices - -- hosts: localhost - tasks: - - name: Create elastigroup - community.general.spotinst_aws_elastigroup: - state: present - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-west-2a - subnet_id: subnet-2b68a15c - image_id: ami-f173cc91 - key_pair: spotinst-oregon - max_size: 15 - min_size: 0 - target: 0 - unit: instance - block_device_mappings: - - device_name: '/dev/xvda' - virtual_name: ephemeral0 - - device_name: '/dev/xvdb/' - virtual_name: ephemeral1 - monitoring: True - name: ansible-group - on_demand_instance_type: c3.large - product: Linux/UNIX - load_balancers: - - test-lb-1 - security_group_ids: - - sg-8f4b8fe9 - spot_instance_types: - - c3.large - do_not_update: - - image_id - - target - register: result - - ansible.builtin.debug: var=result - -# In this example we create a basic group configuration with a network interface defined. -# Each network interface must have a device index - -- hosts: localhost - tasks: - - name: Create elastigroup - community.general.spotinst_aws_elastigroup: - state: present - risk: 100 - availability_vs_cost: balanced - network_interfaces: - - associate_public_ip_address: true - device_index: 0 - availability_zones: - - name: us-west-2a - subnet_id: subnet-2b68a15c - image_id: ami-f173cc91 - key_pair: spotinst-oregon - max_size: 15 - min_size: 0 - target: 0 - unit: instance - monitoring: True - name: ansible-group - on_demand_instance_type: c3.large - product: Linux/UNIX - load_balancers: - - test-lb-1 - security_group_ids: - - sg-8f4b8fe9 - spot_instance_types: - - c3.large - do_not_update: - - image_id - - target - register: result - - ansible.builtin.debug: var=result - - -# In this example we create a basic group configuration with a target tracking scaling policy defined - -- hosts: localhost - tasks: - - name: Create elastigroup - community.general.spotinst_aws_elastigroup: - account_id: act-92d45673 - state: present - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-west-2a - subnet_id: subnet-79da021e - image_id: ami-f173cc91 - fallback_to_od: true - tags: - - Creator: ValueOfCreatorTag - - Environment: ValueOfEnvironmentTag - key_pair: spotinst-labs-oregon - max_size: 10 - min_size: 0 - target: 2 - unit: instance - monitoring: True - name: ansible-group-1 - on_demand_instance_type: c3.large - product: Linux/UNIX - security_group_ids: - - sg-46cdc13d - spot_instance_types: - - c3.large - target_tracking_policies: - - policy_name: target-tracking-1 - namespace: AWS/EC2 - metric_name: CPUUtilization - statistic: average - unit: percent - target: 50 - cooldown: 120 - do_not_update: - - image_id - register: result - - ansible.builtin.debug: var=result -''' - -RETURN = ''' ---- -instances: - description: List of active elastigroup instances and their details. - returned: success - type: dict - sample: [ - { - "spotInstanceRequestId": "sir-regs25zp", - "instanceId": "i-09640ad8678234c", - "instanceType": "m4.large", - "product": "Linux/UNIX", - "availabilityZone": "us-west-2b", - "privateIp": "180.0.2.244", - "createdAt": "2017-07-17T12:46:18.000Z", - "status": "fulfilled" - } - ] -group_id: - description: Created / Updated group's ID. - returned: success - type: str - sample: "sig-12345" - -''' - -HAS_SPOTINST_SDK = False -__metaclass__ = type - -import os -import time -from ansible.module_utils.basic import AnsibleModule - -try: - import spotinst_sdk as spotinst - from spotinst_sdk import SpotinstClientException - - HAS_SPOTINST_SDK = True - -except ImportError: - pass - -eni_fields = ('description', - 'device_index', - 'secondary_private_ip_address_count', - 'associate_public_ip_address', - 'delete_on_termination', - 'groups', - 'network_interface_id', - 'private_ip_address', - 'subnet_id', - 'associate_ipv6_address') - -private_ip_fields = ('private_ip_address', - 'primary') - -capacity_fields = (dict(ansible_field_name='min_size', - spotinst_field_name='minimum'), - dict(ansible_field_name='max_size', - spotinst_field_name='maximum'), - 'target', - 'unit') - -lspec_fields = ('user_data', - 'key_pair', - 'tenancy', - 'shutdown_script', - 'monitoring', - 'ebs_optimized', - 'image_id', - 'health_check_type', - 'health_check_grace_period', - 'health_check_unhealthy_duration_before_replacement', - 'security_group_ids') - -iam_fields = (dict(ansible_field_name='iam_role_name', - spotinst_field_name='name'), - dict(ansible_field_name='iam_role_arn', - spotinst_field_name='arn')) - -scheduled_task_fields = ('adjustment', - 'adjustment_percentage', - 'batch_size_percentage', - 'cron_expression', - 'frequency', - 'grace_period', - 'task_type', - 'is_enabled', - 'scale_target_capacity', - 'scale_min_capacity', - 'scale_max_capacity') - -scaling_policy_fields = ('policy_name', - 'namespace', - 'metric_name', - 'dimensions', - 'statistic', - 'evaluation_periods', - 'period', - 'threshold', - 'cooldown', - 'unit', - 'operator') - -tracking_policy_fields = ('policy_name', - 'namespace', - 'source', - 'metric_name', - 'statistic', - 'unit', - 'cooldown', - 'target', - 'threshold') - -action_fields = (dict(ansible_field_name='action_type', - spotinst_field_name='type'), - 'adjustment', - 'min_target_capacity', - 'max_target_capacity', - 'target', - 'minimum', - 'maximum') - -signal_fields = ('name', - 'timeout') - -multai_lb_fields = ('balancer_id', - 'project_id', - 'target_set_id', - 'az_awareness', - 'auto_weight') - -persistence_fields = ('should_persist_root_device', - 'should_persist_block_devices', - 'should_persist_private_ip') - -strategy_fields = ('risk', - 'utilize_reserved_instances', - 'fallback_to_od', - 'on_demand_count', - 'availability_vs_cost', - 'draining_timeout', - 'spin_up_time', - 'lifetime_period') - -ebs_fields = ('delete_on_termination', - 'encrypted', - 'iops', - 'snapshot_id', - 'volume_type', - 'volume_size') - -bdm_fields = ('device_name', - 'virtual_name', - 'no_device') - -kubernetes_fields = ('api_server', - 'token') - -right_scale_fields = ('account_id', - 'refresh_token') - -rancher_fields = ('access_key', - 'secret_key', - 'master_host', - 'version') - -chef_fields = ('chef_server', - 'organization', - 'user', - 'pem_key', - 'chef_version') - -az_fields = ('name', - 'subnet_id', - 'placement_group_name') - -opsworks_fields = ('layer_id',) - -scaling_strategy_fields = ('terminate_at_end_of_billing_hour',) - -mesosphere_fields = ('api_server',) - -ecs_fields = ('cluster_name',) - -multai_fields = ('multai_token',) - - -def handle_elastigroup(client, module): - has_changed = False - group_id = None - message = 'None' - - name = module.params.get('name') - state = module.params.get('state') - uniqueness_by = module.params.get('uniqueness_by') - external_group_id = module.params.get('id') - - if uniqueness_by == 'id': - if external_group_id is None: - should_create = True - else: - should_create = False - group_id = external_group_id - else: - groups = client.get_elastigroups() - should_create, group_id = find_group_with_same_name(groups, name) - - if should_create is True: - if state == 'present': - eg = expand_elastigroup(module, is_update=False) - module.debug(str(" [INFO] " + message + "\n")) - group = client.create_elastigroup(group=eg) - group_id = group['id'] - message = 'Created group Successfully.' - has_changed = True - - elif state == 'absent': - message = 'Cannot delete non-existent group.' - has_changed = False - else: - eg = expand_elastigroup(module, is_update=True) - - if state == 'present': - group = client.update_elastigroup(group_update=eg, group_id=group_id) - message = 'Updated group successfully.' - - try: - roll_config = module.params.get('roll_config') - if roll_config: - eg_roll = spotinst.aws_elastigroup.Roll( - batch_size_percentage=roll_config.get('batch_size_percentage'), - grace_period=roll_config.get('grace_period'), - health_check_type=roll_config.get('health_check_type') - ) - roll_response = client.roll_group(group_roll=eg_roll, group_id=group_id) - message = 'Updated and started rolling the group successfully.' - - except SpotinstClientException as exc: - message = 'Updated group successfully, but failed to perform roll. Error:' + str(exc) - has_changed = True - - elif state == 'absent': - try: - client.delete_elastigroup(group_id=group_id) - except SpotinstClientException as exc: - if "GROUP_DOESNT_EXIST" in exc.message: - pass - else: - module.fail_json(msg="Error while attempting to delete group : " + exc.message) - - message = 'Deleted group successfully.' - has_changed = True - - return group_id, message, has_changed - - -def retrieve_group_instances(client, module, group_id): - wait_timeout = module.params.get('wait_timeout') - wait_for_instances = module.params.get('wait_for_instances') - - health_check_type = module.params.get('health_check_type') - - if wait_timeout is None: - wait_timeout = 300 - - wait_timeout = time.time() + wait_timeout - target = module.params.get('target') - state = module.params.get('state') - instances = list() - - if state == 'present' and group_id is not None and wait_for_instances is True: - - is_amount_fulfilled = False - while is_amount_fulfilled is False and wait_timeout > time.time(): - instances = list() - amount_of_fulfilled_instances = 0 - - if health_check_type is not None: - healthy_instances = client.get_instance_healthiness(group_id=group_id) - - for healthy_instance in healthy_instances: - if healthy_instance.get('healthStatus') == 'HEALTHY': - amount_of_fulfilled_instances += 1 - instances.append(healthy_instance) - - else: - active_instances = client.get_elastigroup_active_instances(group_id=group_id) - - for active_instance in active_instances: - if active_instance.get('private_ip') is not None: - amount_of_fulfilled_instances += 1 - instances.append(active_instance) - - if amount_of_fulfilled_instances >= target: - is_amount_fulfilled = True - - time.sleep(10) - - return instances - - -def find_group_with_same_name(groups, name): - for group in groups: - if group['name'] == name: - return False, group.get('id') - - return True, None - - -def expand_elastigroup(module, is_update): - do_not_update = module.params['do_not_update'] - name = module.params.get('name') - - eg = spotinst.aws_elastigroup.Elastigroup() - description = module.params.get('description') - - if name is not None: - eg.name = name - if description is not None: - eg.description = description - - # Capacity - expand_capacity(eg, module, is_update, do_not_update) - # Strategy - expand_strategy(eg, module) - # Scaling - expand_scaling(eg, module) - # Third party integrations - expand_integrations(eg, module) - # Compute - expand_compute(eg, module, is_update, do_not_update) - # Multai - expand_multai(eg, module) - # Scheduling - expand_scheduled_tasks(eg, module) - - return eg - - -def expand_compute(eg, module, is_update, do_not_update): - elastic_ips = module.params['elastic_ips'] - on_demand_instance_type = module.params.get('on_demand_instance_type') - spot_instance_types = module.params['spot_instance_types'] - ebs_volume_pool = module.params['ebs_volume_pool'] - availability_zones_list = module.params['availability_zones'] - product = module.params.get('product') - - eg_compute = spotinst.aws_elastigroup.Compute() - - if product is not None: - # Only put product on group creation - if is_update is not True: - eg_compute.product = product - - if elastic_ips is not None: - eg_compute.elastic_ips = elastic_ips - - if on_demand_instance_type or spot_instance_types is not None: - eg_instance_types = spotinst.aws_elastigroup.InstanceTypes() - - if on_demand_instance_type is not None: - eg_instance_types.spot = spot_instance_types - if spot_instance_types is not None: - eg_instance_types.ondemand = on_demand_instance_type - - if eg_instance_types.spot is not None or eg_instance_types.ondemand is not None: - eg_compute.instance_types = eg_instance_types - - expand_ebs_volume_pool(eg_compute, ebs_volume_pool) - - eg_compute.availability_zones = expand_list(availability_zones_list, az_fields, 'AvailabilityZone') - - expand_launch_spec(eg_compute, module, is_update, do_not_update) - - eg.compute = eg_compute - - -def expand_ebs_volume_pool(eg_compute, ebs_volumes_list): - if ebs_volumes_list is not None: - eg_volumes = [] - - for volume in ebs_volumes_list: - eg_volume = spotinst.aws_elastigroup.EbsVolume() - - if volume.get('device_name') is not None: - eg_volume.device_name = volume.get('device_name') - if volume.get('volume_ids') is not None: - eg_volume.volume_ids = volume.get('volume_ids') - - if eg_volume.device_name is not None: - eg_volumes.append(eg_volume) - - if len(eg_volumes) > 0: - eg_compute.ebs_volume_pool = eg_volumes - - -def expand_launch_spec(eg_compute, module, is_update, do_not_update): - eg_launch_spec = expand_fields(lspec_fields, module.params, 'LaunchSpecification') - - if module.params['iam_role_arn'] is not None or module.params['iam_role_name'] is not None: - eg_launch_spec.iam_role = expand_fields(iam_fields, module.params, 'IamRole') - - tags = module.params['tags'] - load_balancers = module.params['load_balancers'] - target_group_arns = module.params['target_group_arns'] - block_device_mappings = module.params['block_device_mappings'] - network_interfaces = module.params['network_interfaces'] - - if is_update is True: - if 'image_id' in do_not_update: - delattr(eg_launch_spec, 'image_id') - - expand_tags(eg_launch_spec, tags) - - expand_load_balancers(eg_launch_spec, load_balancers, target_group_arns) - - expand_block_device_mappings(eg_launch_spec, block_device_mappings) - - expand_network_interfaces(eg_launch_spec, network_interfaces) - - eg_compute.launch_specification = eg_launch_spec - - -def expand_integrations(eg, module): - rancher = module.params.get('rancher') - mesosphere = module.params.get('mesosphere') - ecs = module.params.get('ecs') - kubernetes = module.params.get('kubernetes') - right_scale = module.params.get('right_scale') - opsworks = module.params.get('opsworks') - chef = module.params.get('chef') - - integration_exists = False - - eg_integrations = spotinst.aws_elastigroup.ThirdPartyIntegrations() - - if mesosphere is not None: - eg_integrations.mesosphere = expand_fields(mesosphere_fields, mesosphere, 'Mesosphere') - integration_exists = True - - if ecs is not None: - eg_integrations.ecs = expand_fields(ecs_fields, ecs, 'EcsConfiguration') - integration_exists = True - - if kubernetes is not None: - eg_integrations.kubernetes = expand_fields(kubernetes_fields, kubernetes, 'KubernetesConfiguration') - integration_exists = True - - if right_scale is not None: - eg_integrations.right_scale = expand_fields(right_scale_fields, right_scale, 'RightScaleConfiguration') - integration_exists = True - - if opsworks is not None: - eg_integrations.opsworks = expand_fields(opsworks_fields, opsworks, 'OpsWorksConfiguration') - integration_exists = True - - if rancher is not None: - eg_integrations.rancher = expand_fields(rancher_fields, rancher, 'Rancher') - integration_exists = True - - if chef is not None: - eg_integrations.chef = expand_fields(chef_fields, chef, 'ChefConfiguration') - integration_exists = True - - if integration_exists: - eg.third_parties_integration = eg_integrations - - -def expand_capacity(eg, module, is_update, do_not_update): - eg_capacity = expand_fields(capacity_fields, module.params, 'Capacity') - - if is_update is True: - delattr(eg_capacity, 'unit') - - if 'target' in do_not_update: - delattr(eg_capacity, 'target') - - eg.capacity = eg_capacity - - -def expand_strategy(eg, module): - persistence = module.params.get('persistence') - signals = module.params.get('signals') - - eg_strategy = expand_fields(strategy_fields, module.params, 'Strategy') - - terminate_at_end_of_billing_hour = module.params.get('terminate_at_end_of_billing_hour') - - if terminate_at_end_of_billing_hour is not None: - eg_strategy.eg_scaling_strategy = expand_fields(scaling_strategy_fields, - module.params, 'ScalingStrategy') - - if persistence is not None: - eg_strategy.persistence = expand_fields(persistence_fields, persistence, 'Persistence') - - if signals is not None: - eg_signals = expand_list(signals, signal_fields, 'Signal') - - if len(eg_signals) > 0: - eg_strategy.signals = eg_signals - - eg.strategy = eg_strategy - - -def expand_multai(eg, module): - multai_load_balancers = module.params.get('multai_load_balancers') - - eg_multai = expand_fields(multai_fields, module.params, 'Multai') - - if multai_load_balancers is not None: - eg_multai_load_balancers = expand_list(multai_load_balancers, multai_lb_fields, 'MultaiLoadBalancer') - - if len(eg_multai_load_balancers) > 0: - eg_multai.balancers = eg_multai_load_balancers - eg.multai = eg_multai - - -def expand_scheduled_tasks(eg, module): - scheduled_tasks = module.params.get('scheduled_tasks') - - if scheduled_tasks is not None: - eg_scheduling = spotinst.aws_elastigroup.Scheduling() - - eg_tasks = expand_list(scheduled_tasks, scheduled_task_fields, 'ScheduledTask') - - if len(eg_tasks) > 0: - eg_scheduling.tasks = eg_tasks - eg.scheduling = eg_scheduling - - -def expand_load_balancers(eg_launchspec, load_balancers, target_group_arns): - if load_balancers is not None or target_group_arns is not None: - eg_load_balancers_config = spotinst.aws_elastigroup.LoadBalancersConfig() - eg_total_lbs = [] - - if load_balancers is not None: - for elb_name in load_balancers: - eg_elb = spotinst.aws_elastigroup.LoadBalancer() - if elb_name is not None: - eg_elb.name = elb_name - eg_elb.type = 'CLASSIC' - eg_total_lbs.append(eg_elb) - - if target_group_arns is not None: - for target_arn in target_group_arns: - eg_elb = spotinst.aws_elastigroup.LoadBalancer() - if target_arn is not None: - eg_elb.arn = target_arn - eg_elb.type = 'TARGET_GROUP' - eg_total_lbs.append(eg_elb) - - if len(eg_total_lbs) > 0: - eg_load_balancers_config.load_balancers = eg_total_lbs - eg_launchspec.load_balancers_config = eg_load_balancers_config - - -def expand_tags(eg_launchspec, tags): - if tags is not None: - eg_tags = [] - - for tag in tags: - eg_tag = spotinst.aws_elastigroup.Tag() - if tag: - eg_tag.tag_key, eg_tag.tag_value = list(tag.items())[0] - - eg_tags.append(eg_tag) - - if len(eg_tags) > 0: - eg_launchspec.tags = eg_tags - - -def expand_block_device_mappings(eg_launchspec, bdms): - if bdms is not None: - eg_bdms = [] - - for bdm in bdms: - eg_bdm = expand_fields(bdm_fields, bdm, 'BlockDeviceMapping') - - if bdm.get('ebs') is not None: - eg_bdm.ebs = expand_fields(ebs_fields, bdm.get('ebs'), 'EBS') - - eg_bdms.append(eg_bdm) - - if len(eg_bdms) > 0: - eg_launchspec.block_device_mappings = eg_bdms - - -def expand_network_interfaces(eg_launchspec, enis): - if enis is not None: - eg_enis = [] - - for eni in enis: - eg_eni = expand_fields(eni_fields, eni, 'NetworkInterface') - - eg_pias = expand_list(eni.get('private_ip_addresses'), private_ip_fields, 'PrivateIpAddress') - - if eg_pias is not None: - eg_eni.private_ip_addresses = eg_pias - - eg_enis.append(eg_eni) - - if len(eg_enis) > 0: - eg_launchspec.network_interfaces = eg_enis - - -def expand_scaling(eg, module): - up_scaling_policies = module.params['up_scaling_policies'] - down_scaling_policies = module.params['down_scaling_policies'] - target_tracking_policies = module.params['target_tracking_policies'] - - eg_scaling = spotinst.aws_elastigroup.Scaling() - - if up_scaling_policies is not None: - eg_up_scaling_policies = expand_scaling_policies(up_scaling_policies) - if len(eg_up_scaling_policies) > 0: - eg_scaling.up = eg_up_scaling_policies - - if down_scaling_policies is not None: - eg_down_scaling_policies = expand_scaling_policies(down_scaling_policies) - if len(eg_down_scaling_policies) > 0: - eg_scaling.down = eg_down_scaling_policies - - if target_tracking_policies is not None: - eg_target_tracking_policies = expand_target_tracking_policies(target_tracking_policies) - if len(eg_target_tracking_policies) > 0: - eg_scaling.target = eg_target_tracking_policies - - if eg_scaling.down is not None or eg_scaling.up is not None or eg_scaling.target is not None: - eg.scaling = eg_scaling - - -def expand_list(items, fields, class_name): - if items is not None: - new_objects_list = [] - for item in items: - new_obj = expand_fields(fields, item, class_name) - new_objects_list.append(new_obj) - - return new_objects_list - - -def expand_fields(fields, item, class_name): - class_ = getattr(spotinst.aws_elastigroup, class_name) - new_obj = class_() - - # Handle primitive fields - if item is not None: - for field in fields: - if isinstance(field, dict): - ansible_field_name = field['ansible_field_name'] - spotinst_field_name = field['spotinst_field_name'] - else: - ansible_field_name = field - spotinst_field_name = field - if item.get(ansible_field_name) is not None: - setattr(new_obj, spotinst_field_name, item.get(ansible_field_name)) - - return new_obj - - -def expand_scaling_policies(scaling_policies): - eg_scaling_policies = [] - - for policy in scaling_policies: - eg_policy = expand_fields(scaling_policy_fields, policy, 'ScalingPolicy') - eg_policy.action = expand_fields(action_fields, policy, 'ScalingPolicyAction') - eg_scaling_policies.append(eg_policy) - - return eg_scaling_policies - - -def expand_target_tracking_policies(tracking_policies): - eg_tracking_policies = [] - - for policy in tracking_policies: - eg_policy = expand_fields(tracking_policy_fields, policy, 'TargetTrackingPolicy') - eg_tracking_policies.append(eg_policy) - - return eg_tracking_policies - - -def main(): - fields = dict( - account_id=dict(type='str'), - availability_vs_cost=dict(type='str', required=True), - availability_zones=dict(type='list', elements='dict', required=True), - block_device_mappings=dict(type='list', elements='dict'), - chef=dict(type='dict'), - credentials_path=dict(type='path', default="~/.spotinst/credentials"), - do_not_update=dict(default=[], type='list'), - down_scaling_policies=dict(type='list', elements='dict'), - draining_timeout=dict(type='int'), - ebs_optimized=dict(type='bool'), - ebs_volume_pool=dict(type='list', elements='dict'), - ecs=dict(type='dict'), - elastic_beanstalk=dict(type='dict'), - elastic_ips=dict(type='list', elements='str'), - fallback_to_od=dict(type='bool'), - id=dict(type='str'), - health_check_grace_period=dict(type='int'), - health_check_type=dict(type='str'), - health_check_unhealthy_duration_before_replacement=dict(type='int'), - iam_role_arn=dict(type='str'), - iam_role_name=dict(type='str'), - image_id=dict(type='str', required=True), - key_pair=dict(type='str', no_log=False), - kubernetes=dict(type='dict'), - lifetime_period=dict(type='int'), - load_balancers=dict(type='list', elements='str'), - max_size=dict(type='int', required=True), - mesosphere=dict(type='dict'), - min_size=dict(type='int', required=True), - monitoring=dict(type='str'), - multai_load_balancers=dict(type='list'), - multai_token=dict(type='str', no_log=True), - name=dict(type='str', required=True), - network_interfaces=dict(type='list', elements='dict'), - on_demand_count=dict(type='int'), - on_demand_instance_type=dict(type='str'), - opsworks=dict(type='dict'), - persistence=dict(type='dict'), - product=dict(type='str', required=True), - rancher=dict(type='dict'), - right_scale=dict(type='dict'), - risk=dict(type='int'), - roll_config=dict(type='dict'), - scheduled_tasks=dict(type='list', elements='dict'), - security_group_ids=dict(type='list', elements='str', required=True), - shutdown_script=dict(type='str'), - signals=dict(type='list', elements='dict'), - spin_up_time=dict(type='int'), - spot_instance_types=dict(type='list', elements='str', required=True), - state=dict(default='present', choices=['present', 'absent']), - tags=dict(type='list', elements='dict'), - target=dict(type='int', required=True), - target_group_arns=dict(type='list', elements='str'), - tenancy=dict(type='str'), - terminate_at_end_of_billing_hour=dict(type='bool'), - token=dict(type='str', no_log=True), - unit=dict(type='str'), - user_data=dict(type='str'), - utilize_reserved_instances=dict(type='bool'), - uniqueness_by=dict(default='name', choices=['name', 'id']), - up_scaling_policies=dict(type='list', elements='dict'), - target_tracking_policies=dict(type='list', elements='dict'), - wait_for_instances=dict(type='bool', default=False), - wait_timeout=dict(type='int') - ) - - module = AnsibleModule(argument_spec=fields) - - if not HAS_SPOTINST_SDK: - module.fail_json(msg="the Spotinst SDK library is required. (pip install spotinst_sdk)") - - # Retrieve creds file variables - creds_file_loaded_vars = dict() - - credentials_path = module.params.get('credentials_path') - - try: - with open(credentials_path, "r") as creds: - for line in creds: - eq_index = line.find('=') - var_name = line[:eq_index].strip() - string_value = line[eq_index + 1:].strip() - creds_file_loaded_vars[var_name] = string_value - except IOError: - pass - # End of creds file retrieval - - token = module.params.get('token') - if not token: - token = os.environ.get('SPOTINST_TOKEN') - if not token: - token = creds_file_loaded_vars.get("token") - - account = module.params.get('account_id') - if not account: - account = os.environ.get('SPOTINST_ACCOUNT_ID') or os.environ.get('ACCOUNT') - if not account: - account = creds_file_loaded_vars.get("account") - - client = spotinst.SpotinstClient(auth_token=token, print_output=False) - - if account is not None: - client = spotinst.SpotinstClient(auth_token=token, print_output=False, account_id=account) - - group_id, message, has_changed = handle_elastigroup(client=client, module=module) - - instances = retrieve_group_instances(client=client, module=module, group_id=group_id) - - module.exit_json(changed=has_changed, group_id=group_id, message=message, instances=instances) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/univention/udm_dns_record.py b/ansible_collections/community/general/plugins/modules/cloud/univention/udm_dns_record.py deleted file mode 100644 index 4e7aa70b..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/univention/udm_dns_record.py +++ /dev/null @@ -1,238 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Adfinis SyGroup AG -# Tobias Rueetschi -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: udm_dns_record -author: -- Tobias Rüetschi (@keachi) -short_description: Manage dns entries on a univention corporate server -description: - - "This module allows to manage dns records on a univention corporate server (UCS). - It uses the python API of the UCS to create a new object or edit it." -requirements: - - Python >= 2.6 - - Univention - - ipaddress (for I(type=ptr_record)) -options: - state: - type: str - default: "present" - choices: [ present, absent ] - description: - - Whether the dns record is present or not. - name: - type: str - required: true - description: - - "Name of the record, this is also the DNS record. E.g. www for - www.example.com." - - For PTR records this has to be the IP address. - zone: - type: str - required: true - description: - - Corresponding DNS zone for this record, e.g. example.com. - - For PTR records this has to be the full reverse zone (for example C(1.1.192.in-addr.arpa)). - type: - type: str - required: true - description: - - "Define the record type. C(host_record) is a A or AAAA record, - C(alias) is a CNAME, C(ptr_record) is a PTR record, C(srv_record) - is a SRV record and C(txt_record) is a TXT record." - - "The available choices are: C(host_record), C(alias), C(ptr_record), C(srv_record), C(txt_record)." - data: - type: dict - default: {} - description: - - "Additional data for this record, e.g. ['a': '192.0.2.1']. - Required if C(state=present)." -''' - - -EXAMPLES = ''' -- name: Create a DNS record on a UCS - community.general.udm_dns_record: - name: www - zone: example.com - type: host_record - data: - a: - - 192.0.2.1 - - 2001:0db8::42 - -- name: Create a DNS v4 PTR record on a UCS - community.general.udm_dns_record: - name: 192.0.2.1 - zone: 2.0.192.in-addr.arpa - type: ptr_record - data: - ptr_record: "www.example.com." - -- name: Create a DNS v6 PTR record on a UCS - community.general.udm_dns_record: - name: 2001:db8:0:0:0:ff00:42:8329 - zone: 2.4.0.0.0.0.f.f.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa - type: ptr_record - data: - ptr_record: "www.example.com." -''' - - -RETURN = '''#''' - -HAVE_UNIVENTION = False -HAVE_IPADDRESS = False -try: - from univention.admin.handlers.dns import ( - forward_zone, - reverse_zone, - ) - HAVE_UNIVENTION = True -except ImportError: - pass - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.basic import missing_required_lib -from ansible_collections.community.general.plugins.module_utils.univention_umc import ( - umc_module_for_add, - umc_module_for_edit, - ldap_search, - base_dn, - config, - uldap, -) -try: - import ipaddress - HAVE_IPADDRESS = True -except ImportError: - pass - - -def main(): - module = AnsibleModule( - argument_spec=dict( - type=dict(required=True, - type='str'), - zone=dict(required=True, - type='str'), - name=dict(required=True, - type='str'), - data=dict(default={}, - type='dict'), - state=dict(default='present', - choices=['present', 'absent'], - type='str') - ), - supports_check_mode=True, - required_if=([ - ('state', 'present', ['data']) - ]) - ) - - if not HAVE_UNIVENTION: - module.fail_json(msg="This module requires univention python bindings") - - type = module.params['type'] - zone = module.params['zone'] - name = module.params['name'] - data = module.params['data'] - state = module.params['state'] - changed = False - diff = None - - workname = name - if type == 'ptr_record': - if not HAVE_IPADDRESS: - module.fail_json(msg=missing_required_lib('ipaddress')) - try: - if 'arpa' not in zone: - raise Exception("Zone must be reversed zone for ptr_record. (e.g. 1.1.192.in-addr.arpa)") - ipaddr_rev = ipaddress.ip_address(name).reverse_pointer - subnet_offset = ipaddr_rev.find(zone) - if subnet_offset == -1: - raise Exception("reversed IP address {0} is not part of zone.".format(ipaddr_rev)) - workname = ipaddr_rev[0:subnet_offset - 1] - except Exception as e: - module.fail_json( - msg='handling PTR record for {0} in zone {1} failed: {2}'.format(name, zone, e) - ) - - obj = list(ldap_search( - '(&(objectClass=dNSZone)(zoneName={0})(relativeDomainName={1}))'.format(zone, workname), - attr=['dNSZone'] - )) - exists = bool(len(obj)) - container = 'zoneName={0},cn=dns,{1}'.format(zone, base_dn()) - dn = 'relativeDomainName={0},{1}'.format(workname, container) - - if state == 'present': - try: - if not exists: - so = forward_zone.lookup( - config(), - uldap(), - '(zone={0})'.format(zone), - scope='domain', - ) or reverse_zone.lookup( - config(), - uldap(), - '(zoneName={0})'.format(zone), - scope='domain', - ) - if len(so) == 0: - raise Exception("Did not find zone '{0}' in Univention".format(zone)) - obj = umc_module_for_add('dns/{0}'.format(type), container, superordinate=so[0]) - else: - obj = umc_module_for_edit('dns/{0}'.format(type), dn) - - if type == 'ptr_record': - obj['ip'] = name - obj['address'] = workname - else: - obj['name'] = name - - for k, v in data.items(): - obj[k] = v - diff = obj.diff() - changed = obj.diff() != [] - if not module.check_mode: - if not exists: - obj.create() - else: - obj.modify() - except Exception as e: - module.fail_json( - msg='Creating/editing dns entry {0} in {1} failed: {2}'.format(name, container, e) - ) - - if state == 'absent' and exists: - try: - obj = umc_module_for_edit('dns/{0}'.format(type), dn) - if not module.check_mode: - obj.remove() - changed = True - except Exception as e: - module.fail_json( - msg='Removing dns entry {0} in {1} failed: {2}'.format(name, container, e) - ) - - module.exit_json( - changed=changed, - name=name, - diff=diff, - container=container - ) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/univention/udm_dns_zone.py b/ansible_collections/community/general/plugins/modules/cloud/univention/udm_dns_zone.py deleted file mode 100644 index f1cea87e..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/univention/udm_dns_zone.py +++ /dev/null @@ -1,240 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Adfinis SyGroup AG -# Tobias Rueetschi -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: udm_dns_zone -author: -- Tobias Rüetschi (@keachi) -short_description: Manage dns zones on a univention corporate server -description: - - "This module allows to manage dns zones on a univention corporate server (UCS). - It uses the python API of the UCS to create a new object or edit it." -requirements: - - Python >= 2.6 -options: - state: - type: str - default: "present" - choices: [ present, absent ] - description: - - Whether the dns zone is present or not. - type: - type: str - required: true - description: - - Define if the zone is a forward or reverse DNS zone. - - "The available choices are: C(forward_zone), C(reverse_zone)." - zone: - type: str - required: true - description: - - DNS zone name, e.g. C(example.com). - aliases: [name] - nameserver: - type: list - elements: str - description: - - List of appropriate name servers. Required if C(state=present). - interfaces: - type: list - elements: str - description: - - List of interface IP addresses, on which the server should - response this zone. Required if C(state=present). - - refresh: - type: int - default: 3600 - description: - - Interval before the zone should be refreshed. - retry: - type: int - default: 1800 - description: - - Interval that should elapse before a failed refresh should be retried. - expire: - type: int - default: 604800 - description: - - Specifies the upper limit on the time interval that can elapse before the zone is no longer authoritative. - ttl: - type: int - default: 600 - description: - - Minimum TTL field that should be exported with any RR from this zone. - - contact: - type: str - default: '' - description: - - Contact person in the SOA record. - mx: - type: list - elements: str - default: [] - description: - - List of MX servers. (Must declared as A or AAAA records). -''' - - -EXAMPLES = ''' -- name: Create a DNS zone on a UCS - community.general.udm_dns_zone: - zone: example.com - type: forward_zone - nameserver: - - ucs.example.com - interfaces: - - 192.0.2.1 -''' - - -RETURN = '''# ''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.univention_umc import ( - umc_module_for_add, - umc_module_for_edit, - ldap_search, - base_dn, -) - - -def convert_time(time): - """Convert a time in seconds into the biggest unit""" - units = [ - (24 * 60 * 60, 'days'), - (60 * 60, 'hours'), - (60, 'minutes'), - (1, 'seconds'), - ] - - if time == 0: - return ('0', 'seconds') - for unit in units: - if time >= unit[0]: - return ('{0}'.format(time // unit[0]), unit[1]) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - type=dict(required=True, - type='str'), - zone=dict(required=True, - aliases=['name'], - type='str'), - nameserver=dict(default=[], - type='list', - elements='str'), - interfaces=dict(default=[], - type='list', - elements='str'), - refresh=dict(default=3600, - type='int'), - retry=dict(default=1800, - type='int'), - expire=dict(default=604800, - type='int'), - ttl=dict(default=600, - type='int'), - contact=dict(default='', - type='str'), - mx=dict(default=[], - type='list', - elements='str'), - state=dict(default='present', - choices=['present', 'absent'], - type='str') - ), - supports_check_mode=True, - required_if=([ - ('state', 'present', ['nameserver', 'interfaces']) - ]) - ) - type = module.params['type'] - zone = module.params['zone'] - nameserver = module.params['nameserver'] - interfaces = module.params['interfaces'] - refresh = module.params['refresh'] - retry = module.params['retry'] - expire = module.params['expire'] - ttl = module.params['ttl'] - contact = module.params['contact'] - mx = module.params['mx'] - state = module.params['state'] - changed = False - diff = None - - obj = list(ldap_search( - '(&(objectClass=dNSZone)(zoneName={0}))'.format(zone), - attr=['dNSZone'] - )) - - exists = bool(len(obj)) - container = 'cn=dns,{0}'.format(base_dn()) - dn = 'zoneName={0},{1}'.format(zone, container) - if contact == '': - contact = 'root@{0}.'.format(zone) - - if state == 'present': - try: - if not exists: - obj = umc_module_for_add('dns/{0}'.format(type), container) - else: - obj = umc_module_for_edit('dns/{0}'.format(type), dn) - obj['zone'] = zone - obj['nameserver'] = nameserver - obj['a'] = interfaces - obj['refresh'] = convert_time(refresh) - obj['retry'] = convert_time(retry) - obj['expire'] = convert_time(expire) - obj['ttl'] = convert_time(ttl) - obj['contact'] = contact - obj['mx'] = mx - diff = obj.diff() - if exists: - for k in obj.keys(): - if obj.hasChanged(k): - changed = True - else: - changed = True - if not module.check_mode: - if not exists: - obj.create() - elif changed: - obj.modify() - except Exception as e: - module.fail_json( - msg='Creating/editing dns zone {0} failed: {1}'.format(zone, e) - ) - - if state == 'absent' and exists: - try: - obj = umc_module_for_edit('dns/{0}'.format(type), dn) - if not module.check_mode: - obj.remove() - changed = True - except Exception as e: - module.fail_json( - msg='Removing dns zone {0} failed: {1}'.format(zone, e) - ) - - module.exit_json( - changed=changed, - diff=diff, - zone=zone - ) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/univention/udm_group.py b/ansible_collections/community/general/plugins/modules/cloud/univention/udm_group.py deleted file mode 100644 index d20187c6..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/univention/udm_group.py +++ /dev/null @@ -1,177 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Adfinis SyGroup AG -# Tobias Rueetschi -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: udm_group -author: -- Tobias Rüetschi (@keachi) -short_description: Manage of the posix group -description: - - "This module allows to manage user groups on a univention corporate server (UCS). - It uses the python API of the UCS to create a new object or edit it." -requirements: - - Python >= 2.6 -options: - state: - required: false - default: "present" - choices: [ present, absent ] - description: - - Whether the group is present or not. - type: str - name: - required: true - description: - - Name of the posix group. - type: str - description: - required: false - description: - - Group description. - type: str - position: - required: false - description: - - define the whole ldap position of the group, e.g. - C(cn=g123m-1A,cn=classes,cn=schueler,cn=groups,ou=schule,dc=example,dc=com). - type: str - ou: - required: false - description: - - LDAP OU, e.g. school for LDAP OU C(ou=school,dc=example,dc=com). - type: str - subpath: - required: false - description: - - Subpath inside the OU, e.g. C(cn=classes,cn=students,cn=groups). - type: str - default: "cn=groups" -''' - - -EXAMPLES = ''' -- name: Create a POSIX group - community.general.udm_group: - name: g123m-1A - -# Create a POSIX group with the exact DN -# C(cn=g123m-1A,cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com) -- name: Create a POSIX group with a DN - community.general.udm_group: - name: g123m-1A - subpath: 'cn=classes,cn=students,cn=groups' - ou: school - -# or -- name: Create a POSIX group with a DN - community.general.udm_group: - name: g123m-1A - position: 'cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com' -''' - - -RETURN = '''# ''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.univention_umc import ( - umc_module_for_add, - umc_module_for_edit, - ldap_search, - base_dn, -) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True, - type='str'), - description=dict(default=None, - type='str'), - position=dict(default='', - type='str'), - ou=dict(default='', - type='str'), - subpath=dict(default='cn=groups', - type='str'), - state=dict(default='present', - choices=['present', 'absent'], - type='str') - ), - supports_check_mode=True - ) - name = module.params['name'] - description = module.params['description'] - position = module.params['position'] - ou = module.params['ou'] - subpath = module.params['subpath'] - state = module.params['state'] - changed = False - diff = None - - groups = list(ldap_search( - '(&(objectClass=posixGroup)(cn={0}))'.format(name), - attr=['cn'] - )) - if position != '': - container = position - else: - if ou != '': - ou = 'ou={0},'.format(ou) - if subpath != '': - subpath = '{0},'.format(subpath) - container = '{0}{1}{2}'.format(subpath, ou, base_dn()) - group_dn = 'cn={0},{1}'.format(name, container) - - exists = bool(len(groups)) - - if state == 'present': - try: - if not exists: - grp = umc_module_for_add('groups/group', container) - else: - grp = umc_module_for_edit('groups/group', group_dn) - grp['name'] = name - grp['description'] = description - diff = grp.diff() - changed = grp.diff() != [] - if not module.check_mode: - if not exists: - grp.create() - else: - grp.modify() - except Exception: - module.fail_json( - msg="Creating/editing group {0} in {1} failed".format(name, container) - ) - - if state == 'absent' and exists: - try: - grp = umc_module_for_edit('groups/group', group_dn) - if not module.check_mode: - grp.remove() - changed = True - except Exception: - module.fail_json( - msg="Removing group {0} failed".format(name) - ) - - module.exit_json( - changed=changed, - name=name, - diff=diff, - container=container - ) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/univention/udm_share.py b/ansible_collections/community/general/plugins/modules/cloud/univention/udm_share.py deleted file mode 100644 index fb86d836..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/univention/udm_share.py +++ /dev/null @@ -1,576 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Adfinis SyGroup AG -# Tobias Rueetschi -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: udm_share -author: -- Tobias Rüetschi (@keachi) -short_description: Manage samba shares on a univention corporate server -description: - - "This module allows to manage samba shares on a univention corporate - server (UCS). - It uses the python API of the UCS to create a new object or edit it." -requirements: - - Python >= 2.6 -options: - state: - default: "present" - choices: [ present, absent ] - description: - - Whether the share is present or not. - type: str - name: - required: true - description: - - Name - type: str - host: - required: false - description: - - Host FQDN (server which provides the share), e.g. C({{ - ansible_fqdn }}). Required if C(state=present). - type: str - path: - required: false - description: - - Directory on the providing server, e.g. C(/home). Required if C(state=present). - type: path - sambaName: - required: false - description: - - Windows name. Required if C(state=present). - type: str - aliases: [ samba_name ] - ou: - required: true - description: - - Organisational unit, inside the LDAP Base DN. - type: str - owner: - default: '0' - description: - - Directory owner of the share's root directory. - type: str - group: - default: '0' - description: - - Directory owner group of the share's root directory. - type: str - directorymode: - default: '00755' - description: - - Permissions for the share's root directory. - type: str - root_squash: - default: true - description: - - Modify user ID for root user (root squashing). - type: bool - subtree_checking: - default: true - description: - - Subtree checking. - type: bool - sync: - default: 'sync' - description: - - NFS synchronisation. - type: str - writeable: - default: true - description: - - NFS write access. - type: bool - sambaBlockSize: - description: - - Blocking size. - type: str - aliases: [ samba_block_size ] - sambaBlockingLocks: - default: true - description: - - Blocking locks. - type: bool - aliases: [ samba_blocking_locks ] - sambaBrowseable: - description: - - Show in Windows network environment. - type: bool - default: True - aliases: [ samba_browsable ] - sambaCreateMode: - default: '0744' - description: - - File mode. - type: str - aliases: [ samba_create_mode ] - sambaCscPolicy: - default: 'manual' - description: - - Client-side caching policy. - type: str - aliases: [ samba_csc_policy ] - sambaCustomSettings: - default: [] - description: - - Option name in smb.conf and its value. - type: list - aliases: [ samba_custom_settings ] - sambaDirectoryMode: - default: '0755' - description: - - Directory mode. - type: str - aliases: [ samba_directory_mode ] - sambaDirectorySecurityMode: - default: '0777' - description: - - Directory security mode. - type: str - aliases: [ samba_directory_security_mode ] - sambaDosFilemode: - default: false - description: - - Users with write access may modify permissions. - type: bool - aliases: [ samba_dos_filemode ] - sambaFakeOplocks: - default: false - description: - - Fake oplocks. - type: bool - aliases: [ samba_fake_oplocks ] - sambaForceCreateMode: - default: false - description: - - Force file mode. - type: bool - aliases: [ samba_force_create_mode ] - sambaForceDirectoryMode: - default: false - description: - - Force directory mode. - type: bool - aliases: [ samba_force_directory_mode ] - sambaForceDirectorySecurityMode: - default: false - description: - - Force directory security mode. - type: bool - aliases: [ samba_force_directory_security_mode ] - sambaForceGroup: - description: - - Force group. - type: str - aliases: [ samba_force_group ] - sambaForceSecurityMode: - default: false - description: - - Force security mode. - type: bool - aliases: [ samba_force_security_mode ] - sambaForceUser: - description: - - Force user. - type: str - aliases: [ samba_force_user ] - sambaHideFiles: - description: - - Hide files. - type: str - aliases: [ samba_hide_files ] - sambaHideUnreadable: - default: false - description: - - Hide unreadable files/directories. - type: bool - aliases: [ samba_hide_unreadable ] - sambaHostsAllow: - default: [] - description: - - Allowed host/network. - type: list - aliases: [ samba_hosts_allow ] - sambaHostsDeny: - default: [] - description: - - Denied host/network. - type: list - aliases: [ samba_hosts_deny ] - sambaInheritAcls: - default: true - description: - - Inherit ACLs. - type: bool - aliases: [ samba_inherit_acls ] - sambaInheritOwner: - default: false - description: - - Create files/directories with the owner of the parent directory. - type: bool - aliases: [ samba_inherit_owner ] - sambaInheritPermissions: - default: false - description: - - Create files/directories with permissions of the parent directory. - type: bool - aliases: [ samba_inherit_permissions ] - sambaInvalidUsers: - description: - - Invalid users or groups. - type: str - aliases: [ samba_invalid_users ] - sambaLevel2Oplocks: - default: true - description: - - Level 2 oplocks. - type: bool - aliases: [ samba_level_2_oplocks ] - sambaLocking: - default: true - description: - - Locking. - type: bool - aliases: [ samba_locking ] - sambaMSDFSRoot: - default: false - description: - - MSDFS root. - type: bool - aliases: [ samba_msdfs_root ] - sambaNtAclSupport: - default: true - description: - - NT ACL support. - type: bool - aliases: [ samba_nt_acl_support ] - sambaOplocks: - default: true - description: - - Oplocks. - type: bool - aliases: [ samba_oplocks ] - sambaPostexec: - description: - - Postexec script. - type: str - aliases: [ samba_postexec ] - sambaPreexec: - description: - - Preexec script. - type: str - aliases: [ samba_preexec ] - sambaPublic: - default: false - description: - - Allow anonymous read-only access with a guest user. - type: bool - aliases: [ samba_public ] - sambaSecurityMode: - default: '0777' - description: - - Security mode. - type: str - aliases: [ samba_security_mode ] - sambaStrictLocking: - default: 'Auto' - description: - - Strict locking. - type: str - aliases: [ samba_strict_locking ] - sambaVFSObjects: - description: - - VFS objects. - type: str - aliases: [ samba_vfs_objects ] - sambaValidUsers: - description: - - Valid users or groups. - type: str - aliases: [ samba_valid_users ] - sambaWriteList: - description: - - Restrict write access to these users/groups. - type: str - aliases: [ samba_write_list ] - sambaWriteable: - default: true - description: - - Samba write access. - type: bool - aliases: [ samba_writeable ] - nfs_hosts: - default: [] - description: - - Only allow access for this host, IP address or network. - type: list - nfsCustomSettings: - default: [] - description: - - Option name in exports file. - type: list - aliases: [ nfs_custom_settings ] -''' - - -EXAMPLES = ''' -- name: Create a share named home on the server ucs.example.com with the path /home - community.general.udm_share: - name: home - path: /home - host: ucs.example.com - sambaName: Home -''' - - -RETURN = '''# ''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.univention_umc import ( - umc_module_for_add, - umc_module_for_edit, - ldap_search, - base_dn, -) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True, - type='str'), - ou=dict(required=True, - type='str'), - owner=dict(type='str', - default='0'), - group=dict(type='str', - default='0'), - path=dict(type='path', - default=None), - directorymode=dict(type='str', - default='00755'), - host=dict(type='str', - default=None), - root_squash=dict(type='bool', - default=True), - subtree_checking=dict(type='bool', - default=True), - sync=dict(type='str', - default='sync'), - writeable=dict(type='bool', - default=True), - sambaBlockSize=dict(type='str', - aliases=['samba_block_size'], - default=None), - sambaBlockingLocks=dict(type='bool', - aliases=['samba_blocking_locks'], - default=True), - sambaBrowseable=dict(type='bool', - aliases=['samba_browsable'], - default=True), - sambaCreateMode=dict(type='str', - aliases=['samba_create_mode'], - default='0744'), - sambaCscPolicy=dict(type='str', - aliases=['samba_csc_policy'], - default='manual'), - sambaCustomSettings=dict(type='list', - aliases=['samba_custom_settings'], - default=[]), - sambaDirectoryMode=dict(type='str', - aliases=['samba_directory_mode'], - default='0755'), - sambaDirectorySecurityMode=dict(type='str', - aliases=['samba_directory_security_mode'], - default='0777'), - sambaDosFilemode=dict(type='bool', - aliases=['samba_dos_filemode'], - default=False), - sambaFakeOplocks=dict(type='bool', - aliases=['samba_fake_oplocks'], - default=False), - sambaForceCreateMode=dict(type='bool', - aliases=['samba_force_create_mode'], - default=False), - sambaForceDirectoryMode=dict(type='bool', - aliases=['samba_force_directory_mode'], - default=False), - sambaForceDirectorySecurityMode=dict(type='bool', - aliases=['samba_force_directory_security_mode'], - default=False), - sambaForceGroup=dict(type='str', - aliases=['samba_force_group'], - default=None), - sambaForceSecurityMode=dict(type='bool', - aliases=['samba_force_security_mode'], - default=False), - sambaForceUser=dict(type='str', - aliases=['samba_force_user'], - default=None), - sambaHideFiles=dict(type='str', - aliases=['samba_hide_files'], - default=None), - sambaHideUnreadable=dict(type='bool', - aliases=['samba_hide_unreadable'], - default=False), - sambaHostsAllow=dict(type='list', - aliases=['samba_hosts_allow'], - default=[]), - sambaHostsDeny=dict(type='list', - aliases=['samba_hosts_deny'], - default=[]), - sambaInheritAcls=dict(type='bool', - aliases=['samba_inherit_acls'], - default=True), - sambaInheritOwner=dict(type='bool', - aliases=['samba_inherit_owner'], - default=False), - sambaInheritPermissions=dict(type='bool', - aliases=['samba_inherit_permissions'], - default=False), - sambaInvalidUsers=dict(type='str', - aliases=['samba_invalid_users'], - default=None), - sambaLevel2Oplocks=dict(type='bool', - aliases=['samba_level_2_oplocks'], - default=True), - sambaLocking=dict(type='bool', - aliases=['samba_locking'], - default=True), - sambaMSDFSRoot=dict(type='bool', - aliases=['samba_msdfs_root'], - default=False), - sambaName=dict(type='str', - aliases=['samba_name'], - default=None), - sambaNtAclSupport=dict(type='bool', - aliases=['samba_nt_acl_support'], - default=True), - sambaOplocks=dict(type='bool', - aliases=['samba_oplocks'], - default=True), - sambaPostexec=dict(type='str', - aliases=['samba_postexec'], - default=None), - sambaPreexec=dict(type='str', - aliases=['samba_preexec'], - default=None), - sambaPublic=dict(type='bool', - aliases=['samba_public'], - default=False), - sambaSecurityMode=dict(type='str', - aliases=['samba_security_mode'], - default='0777'), - sambaStrictLocking=dict(type='str', - aliases=['samba_strict_locking'], - default='Auto'), - sambaVFSObjects=dict(type='str', - aliases=['samba_vfs_objects'], - default=None), - sambaValidUsers=dict(type='str', - aliases=['samba_valid_users'], - default=None), - sambaWriteList=dict(type='str', - aliases=['samba_write_list'], - default=None), - sambaWriteable=dict(type='bool', - aliases=['samba_writeable'], - default=True), - nfs_hosts=dict(type='list', - default=[]), - nfsCustomSettings=dict(type='list', - aliases=['nfs_custom_settings'], - default=[]), - state=dict(default='present', - choices=['present', 'absent'], - type='str') - ), - supports_check_mode=True, - required_if=([ - ('state', 'present', ['path', 'host', 'sambaName']) - ]) - ) - name = module.params['name'] - state = module.params['state'] - changed = False - diff = None - - obj = list(ldap_search( - '(&(objectClass=univentionShare)(cn={0}))'.format(name), - attr=['cn'] - )) - - exists = bool(len(obj)) - container = 'cn=shares,ou={0},{1}'.format(module.params['ou'], base_dn()) - dn = 'cn={0},{1}'.format(name, container) - - if state == 'present': - try: - if not exists: - obj = umc_module_for_add('shares/share', container) - else: - obj = umc_module_for_edit('shares/share', dn) - - module.params['printablename'] = '{0} ({1})'.format(name, module.params['host']) - for k in obj.keys(): - if module.params[k] is True: - module.params[k] = '1' - elif module.params[k] is False: - module.params[k] = '0' - obj[k] = module.params[k] - - diff = obj.diff() - if exists: - for k in obj.keys(): - if obj.hasChanged(k): - changed = True - else: - changed = True - if not module.check_mode: - if not exists: - obj.create() - elif changed: - obj.modify() - except Exception as err: - module.fail_json( - msg='Creating/editing share {0} in {1} failed: {2}'.format( - name, - container, - err, - ) - ) - - if state == 'absent' and exists: - try: - obj = umc_module_for_edit('shares/share', dn) - if not module.check_mode: - obj.remove() - changed = True - except Exception as err: - module.fail_json( - msg='Removing share {0} in {1} failed: {2}'.format( - name, - container, - err, - ) - ) - - module.exit_json( - changed=changed, - name=name, - diff=diff, - container=container - ) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/univention/udm_user.py b/ansible_collections/community/general/plugins/modules/cloud/univention/udm_user.py deleted file mode 100644 index b0d6138f..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/univention/udm_user.py +++ /dev/null @@ -1,542 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Adfinis SyGroup AG -# Tobias Rueetschi -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: udm_user -author: -- Tobias Rüetschi (@keachi) -short_description: Manage posix users on a univention corporate server -description: - - "This module allows to manage posix users on a univention corporate - server (UCS). - It uses the python API of the UCS to create a new object or edit it." -requirements: - - Python >= 2.6 -options: - state: - default: "present" - choices: [ present, absent ] - description: - - Whether the user is present or not. - type: str - username: - required: true - description: - - User name - aliases: ['name'] - type: str - firstname: - description: - - First name. Required if C(state=present). - type: str - lastname: - description: - - Last name. Required if C(state=present). - type: str - password: - description: - - Password. Required if C(state=present). - type: str - birthday: - description: - - Birthday - type: str - city: - description: - - City of users business address. - type: str - country: - description: - - Country of users business address. - type: str - department_number: - description: - - Department number of users business address. - aliases: [ departmentNumber ] - type: str - description: - description: - - Description (not gecos) - type: str - display_name: - description: - - Display name (not gecos) - aliases: [ displayName ] - type: str - email: - default: [''] - description: - - A list of e-mail addresses. - type: list - employee_number: - description: - - Employee number - aliases: [ employeeNumber ] - type: str - employee_type: - description: - - Employee type - aliases: [ employeeType ] - type: str - gecos: - description: - - GECOS - type: str - groups: - default: [] - description: - - "POSIX groups, the LDAP DNs of the groups will be found with the - LDAP filter for each group as $GROUP: - C((&(objectClass=posixGroup)(cn=$GROUP)))." - type: list - home_share: - description: - - "Home NFS share. Must be a LDAP DN, e.g. - C(cn=home,cn=shares,ou=school,dc=example,dc=com)." - aliases: [ homeShare ] - type: str - home_share_path: - description: - - Path to home NFS share, inside the homeShare. - aliases: [ homeSharePath ] - type: str - home_telephone_number: - default: [] - description: - - List of private telephone numbers. - aliases: [ homeTelephoneNumber ] - type: list - homedrive: - description: - - Windows home drive, e.g. C("H:"). - type: str - mail_alternative_address: - default: [] - description: - - List of alternative e-mail addresses. - aliases: [ mailAlternativeAddress ] - type: list - mail_home_server: - description: - - FQDN of mail server - aliases: [ mailHomeServer ] - type: str - mail_primary_address: - description: - - Primary e-mail address - aliases: [ mailPrimaryAddress ] - type: str - mobile_telephone_number: - default: [] - description: - - Mobile phone number - aliases: [ mobileTelephoneNumber ] - type: list - organisation: - description: - - Organisation - aliases: [ organization ] - type: str - overridePWHistory: - type: bool - default: 'no' - description: - - Override password history - aliases: [ override_pw_history ] - overridePWLength: - type: bool - default: 'no' - description: - - Override password check - aliases: [ override_pw_length ] - pager_telephonenumber: - default: [] - description: - - List of pager telephone numbers. - aliases: [ pagerTelephonenumber ] - type: list - phone: - description: - - List of telephone numbers. - type: list - postcode: - description: - - Postal code of users business address. - type: str - primary_group: - description: - - Primary group. This must be the group LDAP DN. - - If not specified, it defaults to C(cn=Domain Users,cn=groups,$LDAP_BASE_DN). - aliases: [ primaryGroup ] - type: str - profilepath: - description: - - Windows profile directory - type: str - pwd_change_next_login: - choices: [ '0', '1' ] - description: - - Change password on next login. - aliases: [ pwdChangeNextLogin ] - type: str - room_number: - description: - - Room number of users business address. - aliases: [ roomNumber ] - type: str - samba_privileges: - description: - - "Samba privilege, like allow printer administration, do domain - join." - aliases: [ sambaPrivileges ] - type: list - samba_user_workstations: - description: - - Allow the authentication only on this Microsoft Windows host. - aliases: [ sambaUserWorkstations ] - type: list - sambahome: - description: - - Windows home path, e.g. C('\\$FQDN\$USERNAME'). - type: str - scriptpath: - description: - - Windows logon script. - type: str - secretary: - default: [] - description: - - A list of superiors as LDAP DNs. - type: list - serviceprovider: - default: [''] - description: - - Enable user for the following service providers. - type: list - shell: - default: '/bin/bash' - description: - - Login shell - type: str - street: - description: - - Street of users business address. - type: str - title: - description: - - Title, e.g. C(Prof.). - type: str - unixhome: - description: - - Unix home directory - - If not specified, it defaults to C(/home/$USERNAME). - type: str - userexpiry: - description: - - Account expiry date, e.g. C(1999-12-31). - - If not specified, it defaults to the current day plus one year. - type: str - position: - default: '' - description: - - "Define the whole position of users object inside the LDAP tree, - e.g. C(cn=employee,cn=users,ou=school,dc=example,dc=com)." - type: str - update_password: - default: always - choices: [ always, on_create ] - description: - - "C(always) will update passwords if they differ. - C(on_create) will only set the password for newly created users." - type: str - ou: - default: '' - description: - - "Organizational Unit inside the LDAP Base DN, e.g. C(school) for - LDAP OU C(ou=school,dc=example,dc=com)." - type: str - subpath: - default: 'cn=users' - description: - - "LDAP subpath inside the organizational unit, e.g. - C(cn=teachers,cn=users) for LDAP container - C(cn=teachers,cn=users,dc=example,dc=com)." - type: str -''' - - -EXAMPLES = ''' -- name: Create a user on a UCS - community.general.udm_user: - name: FooBar - password: secure_password - firstname: Foo - lastname: Bar - -- name: Create a user with the DN C(uid=foo,cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com) - community.general.udm_user: - name: foo - password: secure_password - firstname: Foo - lastname: Bar - ou: school - subpath: 'cn=teachers,cn=users' - -# or define the position -- name: Create a user with the DN C(uid=foo,cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com) - community.general.udm_user: - name: foo - password: secure_password - firstname: Foo - lastname: Bar - position: 'cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com' -''' - - -RETURN = '''# ''' - -import crypt -from datetime import date, timedelta - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.univention_umc import ( - umc_module_for_add, - umc_module_for_edit, - ldap_search, - base_dn, -) - - -def main(): - expiry = date.strftime(date.today() + timedelta(days=365), "%Y-%m-%d") - module = AnsibleModule( - argument_spec=dict( - birthday=dict(type='str'), - city=dict(type='str'), - country=dict(type='str'), - department_number=dict(type='str', - aliases=['departmentNumber']), - description=dict(type='str'), - display_name=dict(type='str', - aliases=['displayName']), - email=dict(default=[''], - type='list'), - employee_number=dict(type='str', - aliases=['employeeNumber']), - employee_type=dict(type='str', - aliases=['employeeType']), - firstname=dict(type='str'), - gecos=dict(type='str'), - groups=dict(default=[], - type='list'), - home_share=dict(type='str', - aliases=['homeShare']), - home_share_path=dict(type='str', - aliases=['homeSharePath']), - home_telephone_number=dict(default=[], - type='list', - aliases=['homeTelephoneNumber']), - homedrive=dict(type='str'), - lastname=dict(type='str'), - mail_alternative_address=dict(default=[], - type='list', - aliases=['mailAlternativeAddress']), - mail_home_server=dict(type='str', - aliases=['mailHomeServer']), - mail_primary_address=dict(type='str', - aliases=['mailPrimaryAddress']), - mobile_telephone_number=dict(default=[], - type='list', - aliases=['mobileTelephoneNumber']), - organisation=dict(type='str', - aliases=['organization']), - overridePWHistory=dict(default=False, - type='bool', - aliases=['override_pw_history']), - overridePWLength=dict(default=False, - type='bool', - aliases=['override_pw_length']), - pager_telephonenumber=dict(default=[], - type='list', - aliases=['pagerTelephonenumber']), - password=dict(type='str', - no_log=True), - phone=dict(default=[], - type='list'), - postcode=dict(type='str'), - primary_group=dict(type='str', - aliases=['primaryGroup']), - profilepath=dict(type='str'), - pwd_change_next_login=dict(type='str', - choices=['0', '1'], - aliases=['pwdChangeNextLogin']), - room_number=dict(type='str', - aliases=['roomNumber']), - samba_privileges=dict(default=[], - type='list', - aliases=['sambaPrivileges']), - samba_user_workstations=dict(default=[], - type='list', - aliases=['sambaUserWorkstations']), - sambahome=dict(type='str'), - scriptpath=dict(type='str'), - secretary=dict(default=[], - type='list'), - serviceprovider=dict(default=[''], - type='list'), - shell=dict(default='/bin/bash', - type='str'), - street=dict(type='str'), - title=dict(type='str'), - unixhome=dict(type='str'), - userexpiry=dict(type='str'), - username=dict(required=True, - aliases=['name'], - type='str'), - position=dict(default='', - type='str'), - update_password=dict(default='always', - choices=['always', 'on_create'], - type='str'), - ou=dict(default='', - type='str'), - subpath=dict(default='cn=users', - type='str'), - state=dict(default='present', - choices=['present', 'absent'], - type='str') - ), - supports_check_mode=True, - required_if=([ - ('state', 'present', ['firstname', 'lastname', 'password']) - ]) - ) - username = module.params['username'] - position = module.params['position'] - ou = module.params['ou'] - subpath = module.params['subpath'] - state = module.params['state'] - changed = False - diff = None - - users = list(ldap_search( - '(&(objectClass=posixAccount)(uid={0}))'.format(username), - attr=['uid'] - )) - if position != '': - container = position - else: - if ou != '': - ou = 'ou={0},'.format(ou) - if subpath != '': - subpath = '{0},'.format(subpath) - container = '{0}{1}{2}'.format(subpath, ou, base_dn()) - user_dn = 'uid={0},{1}'.format(username, container) - - exists = bool(len(users)) - - if state == 'present': - try: - if not exists: - obj = umc_module_for_add('users/user', container) - else: - obj = umc_module_for_edit('users/user', user_dn) - - if module.params['displayName'] is None: - module.params['displayName'] = '{0} {1}'.format( - module.params['firstname'], - module.params['lastname'] - ) - if module.params['unixhome'] is None: - module.params['unixhome'] = '/home/{0}'.format( - module.params['username'] - ) - for k in obj.keys(): - if (k != 'password' and - k != 'groups' and - k != 'overridePWHistory' and - k in module.params and - module.params[k] is not None): - obj[k] = module.params[k] - # handle some special values - obj['e-mail'] = module.params['email'] - if 'userexpiry' in obj and obj.get('userexpiry') is None: - obj['userexpiry'] = expiry - password = module.params['password'] - if obj['password'] is None: - obj['password'] = password - if module.params['update_password'] == 'always': - old_password = obj['password'].split('}', 2)[1] - if crypt.crypt(password, old_password) != old_password: - obj['overridePWHistory'] = module.params['overridePWHistory'] - obj['overridePWLength'] = module.params['overridePWLength'] - obj['password'] = password - - diff = obj.diff() - if exists: - for k in obj.keys(): - if obj.hasChanged(k): - changed = True - else: - changed = True - if not module.check_mode: - if not exists: - obj.create() - elif changed: - obj.modify() - except Exception: - module.fail_json( - msg="Creating/editing user {0} in {1} failed".format( - username, - container - ) - ) - try: - groups = module.params['groups'] - if groups: - filter = '(&(objectClass=posixGroup)(|(cn={0})))'.format( - ')(cn='.join(groups) - ) - group_dns = list(ldap_search(filter, attr=['dn'])) - for dn in group_dns: - grp = umc_module_for_edit('groups/group', dn[0]) - if user_dn not in grp['users']: - grp['users'].append(user_dn) - if not module.check_mode: - grp.modify() - changed = True - except Exception: - module.fail_json( - msg="Adding groups to user {0} failed".format(username) - ) - - if state == 'absent' and exists: - try: - obj = umc_module_for_edit('users/user', user_dn) - if not module.check_mode: - obj.remove() - changed = True - except Exception: - module.fail_json( - msg="Removing user {0} failed".format(username) - ) - - module.exit_json( - changed=changed, - username=username, - diff=diff, - container=container - ) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_app.py b/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_app.py deleted file mode 100644 index 1839db38..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_app.py +++ /dev/null @@ -1,197 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2015, Quentin Stafford-Fraser, with contributions gratefully acknowledged from: -# * Andy Baker -# * Federico Tarantini -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# Create a Webfaction application using Ansible and the Webfaction API -# -# Valid application types can be found by looking here: -# https://docs.webfaction.com/xmlrpc-api/apps.html#application-types - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: webfaction_app -short_description: Add or remove applications on a Webfaction host -description: - - Add or remove applications on a Webfaction host. Further documentation at U(https://github.com/quentinsf/ansible-webfaction). -author: Quentin Stafford-Fraser (@quentinsf) -notes: - - > - You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API. - The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as - your host, you may want to add C(serial: 1) to the plays. - - See `the webfaction API `_ for more info. - -options: - name: - description: - - The name of the application - required: true - type: str - - state: - description: - - Whether the application should exist - choices: ['present', 'absent'] - default: "present" - type: str - - type: - description: - - The type of application to create. See the Webfaction docs at U(https://docs.webfaction.com/xmlrpc-api/apps.html) for a list. - required: true - type: str - - autostart: - description: - - Whether the app should restart with an C(autostart.cgi) script - type: bool - default: 'no' - - extra_info: - description: - - Any extra parameters required by the app - default: '' - type: str - - port_open: - description: - - IF the port should be opened - type: bool - default: 'no' - - login_name: - description: - - The webfaction account to use - required: true - type: str - - login_password: - description: - - The webfaction password to use - required: true - type: str - - machine: - description: - - The machine name to use (optional for accounts with only one machine) - type: str - -''' - -EXAMPLES = ''' - - name: Create a test app - community.general.webfaction_app: - name: "my_wsgi_app1" - state: present - type: mod_wsgi35-python27 - login_name: "{{webfaction_user}}" - login_password: "{{webfaction_passwd}}" - machine: "{{webfaction_machine}}" -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xmlrpc_client - - -webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/') - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - state=dict(required=False, choices=['present', 'absent'], default='present'), - type=dict(required=True), - autostart=dict(required=False, type='bool', default=False), - extra_info=dict(required=False, default=""), - port_open=dict(required=False, type='bool', default=False), - login_name=dict(required=True), - login_password=dict(required=True, no_log=True), - machine=dict(required=False, default=None), - ), - supports_check_mode=True - ) - app_name = module.params['name'] - app_type = module.params['type'] - app_state = module.params['state'] - - if module.params['machine']: - session_id, account = webfaction.login( - module.params['login_name'], - module.params['login_password'], - module.params['machine'] - ) - else: - session_id, account = webfaction.login( - module.params['login_name'], - module.params['login_password'] - ) - - app_list = webfaction.list_apps(session_id) - app_map = dict([(i['name'], i) for i in app_list]) - existing_app = app_map.get(app_name) - - result = {} - - # Here's where the real stuff happens - - if app_state == 'present': - - # Does an app with this name already exist? - if existing_app: - if existing_app['type'] != app_type: - module.fail_json(msg="App already exists with different type. Please fix by hand.") - - # If it exists with the right type, we don't change it - # Should check other parameters. - module.exit_json( - changed=False, - result=existing_app, - ) - - if not module.check_mode: - # If this isn't a dry run, create the app - result.update( - webfaction.create_app( - session_id, app_name, app_type, - module.boolean(module.params['autostart']), - module.params['extra_info'], - module.boolean(module.params['port_open']) - ) - ) - - elif app_state == 'absent': - - # If the app's already not there, nothing changed. - if not existing_app: - module.exit_json( - changed=False, - ) - - if not module.check_mode: - # If this isn't a dry run, delete the app - result.update( - webfaction.delete_app(session_id, app_name) - ) - - else: - module.fail_json(msg="Unknown state specified: {0}".format(app_state)) - - module.exit_json( - changed=True, - result=result - ) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_db.py b/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_db.py deleted file mode 100644 index 11563426..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_db.py +++ /dev/null @@ -1,195 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2015, Quentin Stafford-Fraser, with contributions gratefully acknowledged from: -# * Andy Baker -# * Federico Tarantini -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# Create a webfaction database using Ansible and the Webfaction API - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: webfaction_db -short_description: Add or remove a database on Webfaction -description: - - Add or remove a database on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction. -author: Quentin Stafford-Fraser (@quentinsf) -notes: - - > - You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API. - The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as - your host, you may want to add C(serial: 1) to the plays. - - See `the webfaction API `_ for more info. -options: - - name: - description: - - The name of the database - required: true - type: str - - state: - description: - - Whether the database should exist - choices: ['present', 'absent'] - default: "present" - type: str - - type: - description: - - The type of database to create. - required: true - choices: ['mysql', 'postgresql'] - type: str - - password: - description: - - The password for the new database user. - type: str - - login_name: - description: - - The webfaction account to use - required: true - type: str - - login_password: - description: - - The webfaction password to use - required: true - type: str - - machine: - description: - - The machine name to use (optional for accounts with only one machine) - type: str -''' - -EXAMPLES = ''' - # This will also create a default DB user with the same - # name as the database, and the specified password. - - - name: Create a database - community.general.webfaction_db: - name: "{{webfaction_user}}_db1" - password: mytestsql - type: mysql - login_name: "{{webfaction_user}}" - login_password: "{{webfaction_passwd}}" - machine: "{{webfaction_machine}}" - - # Note that, for symmetry's sake, deleting a database using - # 'state: absent' will also delete the matching user. - -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xmlrpc_client - - -webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/') - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - state=dict(required=False, choices=['present', 'absent'], default='present'), - # You can specify an IP address or hostname. - type=dict(required=True, choices=['mysql', 'postgresql']), - password=dict(required=False, default=None, no_log=True), - login_name=dict(required=True), - login_password=dict(required=True, no_log=True), - machine=dict(required=False, default=None), - ), - supports_check_mode=True - ) - db_name = module.params['name'] - db_state = module.params['state'] - db_type = module.params['type'] - db_passwd = module.params['password'] - - if module.params['machine']: - session_id, account = webfaction.login( - module.params['login_name'], - module.params['login_password'], - module.params['machine'] - ) - else: - session_id, account = webfaction.login( - module.params['login_name'], - module.params['login_password'] - ) - - db_list = webfaction.list_dbs(session_id) - db_map = dict([(i['name'], i) for i in db_list]) - existing_db = db_map.get(db_name) - - user_list = webfaction.list_db_users(session_id) - user_map = dict([(i['username'], i) for i in user_list]) - existing_user = user_map.get(db_name) - - result = {} - - # Here's where the real stuff happens - - if db_state == 'present': - - # Does a database with this name already exist? - if existing_db: - # Yes, but of a different type - fail - if existing_db['db_type'] != db_type: - module.fail_json(msg="Database already exists but is a different type. Please fix by hand.") - - # If it exists with the right type, we don't change anything. - module.exit_json( - changed=False, - ) - - if not module.check_mode: - # If this isn't a dry run, create the db - # and default user. - result.update( - webfaction.create_db( - session_id, db_name, db_type, db_passwd - ) - ) - - elif db_state == 'absent': - - # If this isn't a dry run... - if not module.check_mode: - - if not (existing_db or existing_user): - module.exit_json(changed=False,) - - if existing_db: - # Delete the db if it exists - result.update( - webfaction.delete_db(session_id, db_name, db_type) - ) - - if existing_user: - # Delete the default db user if it exists - result.update( - webfaction.delete_db_user(session_id, db_name, db_type) - ) - - else: - module.fail_json(msg="Unknown state specified: {0}".format(db_state)) - - module.exit_json( - changed=True, - result=result - ) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_domain.py b/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_domain.py deleted file mode 100644 index f9c3b7db..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_domain.py +++ /dev/null @@ -1,168 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2015, Quentin Stafford-Fraser -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# Create Webfaction domains and subdomains using Ansible and the Webfaction API - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: webfaction_domain -short_description: Add or remove domains and subdomains on Webfaction -description: - - Add or remove domains or subdomains on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction. -author: Quentin Stafford-Fraser (@quentinsf) -notes: - - If you are I(deleting) domains by using C(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted. - If you don't specify subdomains, the domain will be deleted. - - > - You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API. - The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as - your host, you may want to add C(serial: 1) to the plays. - - See `the webfaction API `_ for more info. - -options: - - name: - description: - - The name of the domain - required: true - type: str - - state: - description: - - Whether the domain should exist - choices: ['present', 'absent'] - default: "present" - type: str - - subdomains: - description: - - Any subdomains to create. - default: [] - type: list - elements: str - - login_name: - description: - - The webfaction account to use - required: true - type: str - - login_password: - description: - - The webfaction password to use - required: true - type: str -''' - -EXAMPLES = ''' - - name: Create a test domain - community.general.webfaction_domain: - name: mydomain.com - state: present - subdomains: - - www - - blog - login_name: "{{webfaction_user}}" - login_password: "{{webfaction_passwd}}" - - - name: Delete test domain and any subdomains - community.general.webfaction_domain: - name: mydomain.com - state: absent - login_name: "{{webfaction_user}}" - login_password: "{{webfaction_passwd}}" - -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xmlrpc_client - - -webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/') - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - state=dict(choices=['present', 'absent'], default='present'), - subdomains=dict(default=[], type='list', elements='str'), - login_name=dict(required=True), - login_password=dict(required=True, no_log=True), - ), - supports_check_mode=True - ) - domain_name = module.params['name'] - domain_state = module.params['state'] - domain_subdomains = module.params['subdomains'] - - session_id, account = webfaction.login( - module.params['login_name'], - module.params['login_password'] - ) - - domain_list = webfaction.list_domains(session_id) - domain_map = dict([(i['domain'], i) for i in domain_list]) - existing_domain = domain_map.get(domain_name) - - result = {} - - # Here's where the real stuff happens - - if domain_state == 'present': - - # Does an app with this name already exist? - if existing_domain: - - if set(existing_domain['subdomains']) >= set(domain_subdomains): - # If it exists with the right subdomains, we don't change anything. - module.exit_json( - changed=False, - ) - - positional_args = [session_id, domain_name] + domain_subdomains - - if not module.check_mode: - # If this isn't a dry run, create the app - # print positional_args - result.update( - webfaction.create_domain( - *positional_args - ) - ) - - elif domain_state == 'absent': - - # If the app's already not there, nothing changed. - if not existing_domain: - module.exit_json( - changed=False, - ) - - positional_args = [session_id, domain_name] + domain_subdomains - - if not module.check_mode: - # If this isn't a dry run, delete the app - result.update( - webfaction.delete_domain(*positional_args) - ) - - else: - module.fail_json(msg="Unknown state specified: {0}".format(domain_state)) - - module.exit_json( - changed=True, - result=result - ) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_mailbox.py b/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_mailbox.py deleted file mode 100644 index 37755763..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_mailbox.py +++ /dev/null @@ -1,135 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2015, Quentin Stafford-Fraser and Andy Baker -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# Create webfaction mailbox using Ansible and the Webfaction API - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: webfaction_mailbox -short_description: Add or remove mailboxes on Webfaction -description: - - Add or remove mailboxes on a Webfaction account. Further documentation at https://github.com/quentinsf/ansible-webfaction. -author: Quentin Stafford-Fraser (@quentinsf) -notes: - - > - You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API. - The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as - your host, you may want to add C(serial: 1) to the plays. - - See `the webfaction API `_ for more info. -options: - - mailbox_name: - description: - - The name of the mailbox - required: true - type: str - - mailbox_password: - description: - - The password for the mailbox - required: true - type: str - - state: - description: - - Whether the mailbox should exist - choices: ['present', 'absent'] - default: "present" - type: str - - login_name: - description: - - The webfaction account to use - required: true - type: str - - login_password: - description: - - The webfaction password to use - required: true - type: str -''' - -EXAMPLES = ''' - - name: Create a mailbox - community.general.webfaction_mailbox: - mailbox_name="mybox" - mailbox_password="myboxpw" - state=present - login_name={{webfaction_user}} - login_password={{webfaction_passwd}} -''' - - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xmlrpc_client - - -webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/') - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - mailbox_name=dict(required=True), - mailbox_password=dict(required=True, no_log=True), - state=dict(required=False, choices=['present', 'absent'], default='present'), - login_name=dict(required=True), - login_password=dict(required=True, no_log=True), - ), - supports_check_mode=True - ) - - mailbox_name = module.params['mailbox_name'] - site_state = module.params['state'] - - session_id, account = webfaction.login( - module.params['login_name'], - module.params['login_password'] - ) - - mailbox_list = [x['mailbox'] for x in webfaction.list_mailboxes(session_id)] - existing_mailbox = mailbox_name in mailbox_list - - result = {} - - # Here's where the real stuff happens - - if site_state == 'present': - - # Does a mailbox with this name already exist? - if existing_mailbox: - module.exit_json(changed=False,) - - positional_args = [session_id, mailbox_name] - - if not module.check_mode: - # If this isn't a dry run, create the mailbox - result.update(webfaction.create_mailbox(*positional_args)) - - elif site_state == 'absent': - - # If the mailbox is already not there, nothing changed. - if not existing_mailbox: - module.exit_json(changed=False) - - if not module.check_mode: - # If this isn't a dry run, delete the mailbox - result.update(webfaction.delete_mailbox(session_id, mailbox_name)) - - else: - module.fail_json(msg="Unknown state specified: {0}".format(site_state)) - - module.exit_json(changed=True, result=result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_site.py b/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_site.py deleted file mode 100644 index 87faade3..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_site.py +++ /dev/null @@ -1,207 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2015, Quentin Stafford-Fraser -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# Create Webfaction website using Ansible and the Webfaction API - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: webfaction_site -short_description: Add or remove a website on a Webfaction host -description: - - Add or remove a website on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction. -author: Quentin Stafford-Fraser (@quentinsf) -notes: - - Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP - address. You can use a DNS name. - - If a site of the same name exists in the account but on a different host, the operation will exit. - - > - You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API. - The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as - your host, you may want to add C(serial: 1) to the plays. - - See `the webfaction API `_ for more info. - -options: - - name: - description: - - The name of the website - required: true - type: str - - state: - description: - - Whether the website should exist - choices: ['present', 'absent'] - default: "present" - type: str - - host: - description: - - The webfaction host on which the site should be created. - required: true - type: str - - https: - description: - - Whether or not to use HTTPS - type: bool - default: 'no' - - site_apps: - description: - - A mapping of URLs to apps - default: [] - type: list - elements: list - - subdomains: - description: - - A list of subdomains associated with this site. - default: [] - type: list - elements: str - - login_name: - description: - - The webfaction account to use - required: true - type: str - - login_password: - description: - - The webfaction password to use - required: true - type: str -''' - -EXAMPLES = ''' - - name: Create website - community.general.webfaction_site: - name: testsite1 - state: present - host: myhost.webfaction.com - subdomains: - - 'testsite1.my_domain.org' - site_apps: - - ['testapp1', '/'] - https: no - login_name: "{{webfaction_user}}" - login_password: "{{webfaction_passwd}}" -''' - -import socket - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xmlrpc_client - - -webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/') - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - state=dict(choices=['present', 'absent'], default='present'), - # You can specify an IP address or hostname. - host=dict(required=True), - https=dict(required=False, type='bool', default=False), - subdomains=dict(type='list', elements='str', default=[]), - site_apps=dict(type='list', elements='list', default=[]), - login_name=dict(required=True), - login_password=dict(required=True, no_log=True), - ), - supports_check_mode=True - ) - site_name = module.params['name'] - site_state = module.params['state'] - site_host = module.params['host'] - site_ip = socket.gethostbyname(site_host) - - session_id, account = webfaction.login( - module.params['login_name'], - module.params['login_password'] - ) - - site_list = webfaction.list_websites(session_id) - site_map = dict([(i['name'], i) for i in site_list]) - existing_site = site_map.get(site_name) - - result = {} - - # Here's where the real stuff happens - - if site_state == 'present': - - # Does a site with this name already exist? - if existing_site: - - # If yes, but it's on a different IP address, then fail. - # If we wanted to allow relocation, we could add a 'relocate=true' option - # which would get the existing IP address, delete the site there, and create it - # at the new address. A bit dangerous, perhaps, so for now we'll require manual - # deletion if it's on another host. - - if existing_site['ip'] != site_ip: - module.fail_json(msg="Website already exists with a different IP address. Please fix by hand.") - - # If it's on this host and the key parameters are the same, nothing needs to be done. - - if (existing_site['https'] == module.boolean(module.params['https'])) and \ - (set(existing_site['subdomains']) == set(module.params['subdomains'])) and \ - (dict(existing_site['website_apps']) == dict(module.params['site_apps'])): - module.exit_json( - changed=False - ) - - positional_args = [ - session_id, site_name, site_ip, - module.boolean(module.params['https']), - module.params['subdomains'], - ] - for a in module.params['site_apps']: - positional_args.append((a[0], a[1])) - - if not module.check_mode: - # If this isn't a dry run, create or modify the site - result.update( - webfaction.create_website( - *positional_args - ) if not existing_site else webfaction.update_website( - *positional_args - ) - ) - - elif site_state == 'absent': - - # If the site's already not there, nothing changed. - if not existing_site: - module.exit_json( - changed=False, - ) - - if not module.check_mode: - # If this isn't a dry run, delete the site - result.update( - webfaction.delete_website(session_id, site_name, site_ip) - ) - - else: - module.fail_json(msg="Unknown state specified: {0}".format(site_state)) - - module.exit_json( - changed=True, - result=result - ) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest.py b/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest.py deleted file mode 100644 index b90b380c..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest.py +++ /dev/null @@ -1,2026 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2018, Bojan Vitnik -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: xenserver_guest -short_description: Manages virtual machines running on Citrix Hypervisor/XenServer host or pool -description: > - This module can be used to create new virtual machines from templates or other virtual machines, - modify various virtual machine components like network and disk, rename a virtual machine and - remove a virtual machine with associated components. -author: -- Bojan Vitnik (@bvitnik) -notes: -- Minimal supported version of XenServer is 5.6. -- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0. -- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside - Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your - Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub: - U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)' -- 'If no scheme is specified in I(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are - accessing XenServer host in trusted environment or use C(https://) scheme explicitly.' -- 'To use C(https://) scheme for I(hostname) you have to either import host certificate to your OS certificate store or use I(validate_certs): C(no) - which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.' -- 'Network configuration inside a guest OS, by using I(networks.type), I(networks.ip), I(networks.gateway) etc. parameters, is supported on - XenServer 7.0 or newer for Windows guests by using official XenServer Guest agent support for network configuration. The module will try to - detect if such support is available and utilize it, else it will use a custom method of configuration via xenstore. Since XenServer Guest - agent only support None and Static types of network configuration, where None means DHCP configured interface, I(networks.type) and I(networks.type6) - values C(none) and C(dhcp) have same effect. More info here: - U(https://www.citrix.com/community/citrix-developer/citrix-hypervisor-developer/citrix-hypervisor-developing-products/citrix-hypervisor-staticip.html)' -- 'On platforms without official support for network configuration inside a guest OS, network parameters will be written to xenstore - C(vm-data/networks/) key. Parameters can be inspected by using C(xenstore ls) and C(xenstore read) tools on \*nix guests or trough - WMI interface on Windows guests. They can also be found in VM facts C(instance.xenstore_data) key as returned by the module. It is up to the user - to implement a boot time scripts or custom agent that will read the parameters from xenstore and configure network with given parameters. - Take note that for xenstore data to become available inside a guest, a VM restart is needed hence module will require VM restart if any - parameter is changed. This is a limitation of XenAPI and xenstore. Considering these limitations, network configuration trough xenstore is most - useful for bootstraping newly deployed VMs, much less for reconfiguring existing ones. More info here: - U(https://support.citrix.com/article/CTX226713)' -requirements: -- python >= 2.6 -- XenAPI -options: - state: - description: - - Specify the state VM should be in. - - If I(state) is set to C(present) and VM exists, ensure the VM configuration conforms to given parameters. - - If I(state) is set to C(present) and VM does not exist, then VM is deployed with given parameters. - - If I(state) is set to C(absent) and VM exists, then VM is removed with its associated components. - - If I(state) is set to C(poweredon) and VM does not exist, then VM is deployed with given parameters and powered on automatically. - type: str - default: present - choices: [ present, absent, poweredon ] - name: - description: - - Name of the VM to work with. - - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found. - - In case of multiple VMs with same name, use I(uuid) to uniquely specify VM to manage. - - This parameter is case sensitive. - type: str - aliases: [ name_label ] - name_desc: - description: - - VM description. - type: str - uuid: - description: - - UUID of the VM to manage if known. This is XenServer's unique identifier. - - It is required if name is not unique. - - Please note that a supplied UUID will be ignored on VM creation, as XenServer creates the UUID internally. - type: str - template: - description: - - Name of a template, an existing VM (must be shut down) or a snapshot that should be used to create VM. - - Templates/VMs/snapshots on XenServer do not necessarily have unique names. The module will fail if multiple templates with same name are found. - - In case of multiple templates/VMs/snapshots with same name, use I(template_uuid) to uniquely specify source template. - - If VM already exists, this setting will be ignored. - - This parameter is case sensitive. - type: str - aliases: [ template_src ] - template_uuid: - description: - - UUID of a template, an existing VM or a snapshot that should be used to create VM. - - It is required if template name is not unique. - type: str - is_template: - description: - - Convert VM to template. - type: bool - default: no - folder: - description: - - Destination folder for VM. - - This parameter is case sensitive. - - 'Example:' - - ' folder: /folder1/folder2' - type: str - hardware: - description: - - Manage VM's hardware parameters. VM needs to be shut down to reconfigure these parameters. - type: dict - suboptions: - num_cpus: - description: - - Number of CPUs. - type: int - num_cpu_cores_per_socket: - description: - - Number of Cores Per Socket. I(num_cpus) has to be a multiple of I(num_cpu_cores_per_socket). - type: int - memory_mb: - description: - - Amount of memory in MB. - type: int - disks: - description: - - A list of disks to add to VM. - - All parameters are case sensitive. - - Removing or detaching existing disks of VM is not supported. - - New disks are required to have either a I(size) or one of I(size_[tb,gb,mb,kb,b]) parameters specified. - - VM needs to be shut down to reconfigure disk size. - type: list - elements: dict - aliases: [ disk ] - suboptions: - size: - description: - - 'Disk size with unit. Unit must be: C(b), C(kb), C(mb), C(gb), C(tb). VM needs to be shut down to reconfigure this parameter.' - - If no unit is specified, size is assumed to be in bytes. - type: str - size_b: - description: - - Disk size in bytes. - type: str - size_kb: - description: - - Disk size in kilobytes. - type: str - size_mb: - description: - - Disk size in megabytes. - type: str - size_gb: - description: - - Disk size in gigabytes. - type: str - size_tb: - description: - - Disk size in terabytes. - type: str - name: - description: - - Disk name. - type: str - aliases: [ name_label ] - name_desc: - description: - - Disk description. - type: str - sr: - description: - - Storage Repository to create disk on. If not specified, will use default SR. Cannot be used for moving disk to other SR. - type: str - sr_uuid: - description: - - UUID of a SR to create disk on. Use if SR name is not unique. - type: str - cdrom: - description: - - A CD-ROM configuration for the VM. - - All parameters are case sensitive. - type: dict - suboptions: - type: - description: - - The type of CD-ROM. With C(none) the CD-ROM device will be present but empty. - type: str - choices: [ none, iso ] - iso_name: - description: - - 'The file name of an ISO image from one of the XenServer ISO Libraries (implies I(type): C(iso)).' - - Required if I(type) is set to C(iso). - type: str - networks: - description: - - A list of networks (in the order of the NICs). - - All parameters are case sensitive. - - Name is required for new NICs. Other parameters are optional in all cases. - type: list - elements: dict - aliases: [ network ] - suboptions: - name: - description: - - Name of a XenServer network to attach the network interface to. - type: str - aliases: [ name_label ] - mac: - description: - - Customize MAC address of the interface. - type: str - type: - description: - - Type of IPv4 assignment. Value C(none) means whatever is default for OS. - - On some operating systems it could be DHCP configured (e.g. Windows) or unconfigured interface (e.g. Linux). - type: str - choices: [ none, dhcp, static ] - ip: - description: - - 'Static IPv4 address (implies I(type): C(static)). Can include prefix in format C(/) instead of using C(netmask).' - type: str - netmask: - description: - - Static IPv4 netmask required for I(ip) if prefix is not specified. - type: str - gateway: - description: - - Static IPv4 gateway. - type: str - type6: - description: - - Type of IPv6 assignment. Value C(none) means whatever is default for OS. - type: str - choices: [ none, dhcp, static ] - ip6: - description: - - 'Static IPv6 address (implies I(type6): C(static)) with prefix in format C(/).' - type: str - gateway6: - description: - - Static IPv6 gateway. - type: str - home_server: - description: - - Name of a XenServer host that will be a Home Server for the VM. - - This parameter is case sensitive. - type: str - custom_params: - description: - - Define a list of custom VM params to set on VM. - - Useful for advanced users familiar with managing VM params trough xe CLI. - - A custom value object takes two fields I(key) and I(value) (see example below). - type: list - elements: dict - suboptions: - key: - description: - - VM param name. - type: str - required: yes - value: - description: - - VM param value. - type: raw - required: yes - wait_for_ip_address: - description: - - Wait until XenServer detects an IP address for the VM. If I(state) is set to C(absent), this parameter is ignored. - - This requires XenServer Tools to be preinstalled on the VM to work properly. - type: bool - default: no - state_change_timeout: - description: - - 'By default, module will wait indefinitely for VM to accquire an IP address if I(wait_for_ip_address): C(yes).' - - If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change. - - In case of timeout, module will generate an error message. - type: int - default: 0 - linked_clone: - description: - - Whether to create a Linked Clone from the template, existing VM or snapshot. If no, will create a full copy. - - This is equivalent to C(Use storage-level fast disk clone) option in XenCenter. - type: bool - default: no - force: - description: - - Ignore warnings and complete the actions. - - This parameter is useful for removing VM in running state or reconfiguring VM params that require VM to be shut down. - type: bool - default: no -extends_documentation_fragment: -- community.general.xenserver.documentation - -''' - -EXAMPLES = r''' -- name: Create a VM from a template - community.general.xenserver_guest: - hostname: "{{ xenserver_hostname }}" - username: "{{ xenserver_username }}" - password: "{{ xenserver_password }}" - validate_certs: no - folder: /testvms - name: testvm_2 - state: poweredon - template: CentOS 7 - disks: - - size_gb: 10 - sr: my_sr - hardware: - num_cpus: 6 - num_cpu_cores_per_socket: 3 - memory_mb: 512 - cdrom: - type: iso - iso_name: guest-tools.iso - networks: - - name: VM Network - mac: aa:bb:dd:aa:00:14 - wait_for_ip_address: yes - delegate_to: localhost - register: deploy - -- name: Create a VM template - community.general.xenserver_guest: - hostname: "{{ xenserver_hostname }}" - username: "{{ xenserver_username }}" - password: "{{ xenserver_password }}" - validate_certs: no - folder: /testvms - name: testvm_6 - is_template: yes - disk: - - size_gb: 10 - sr: my_sr - hardware: - memory_mb: 512 - num_cpus: 1 - delegate_to: localhost - register: deploy - -- name: Rename a VM (requires the VM's UUID) - community.general.xenserver_guest: - hostname: "{{ xenserver_hostname }}" - username: "{{ xenserver_username }}" - password: "{{ xenserver_password }}" - uuid: 421e4592-c069-924d-ce20-7e7533fab926 - name: new_name - state: present - delegate_to: localhost - -- name: Remove a VM by UUID - community.general.xenserver_guest: - hostname: "{{ xenserver_hostname }}" - username: "{{ xenserver_username }}" - password: "{{ xenserver_password }}" - uuid: 421e4592-c069-924d-ce20-7e7533fab926 - state: absent - delegate_to: localhost - -- name: Modify custom params (boot order) - community.general.xenserver_guest: - hostname: "{{ xenserver_hostname }}" - username: "{{ xenserver_username }}" - password: "{{ xenserver_password }}" - name: testvm_8 - state: present - custom_params: - - key: HVM_boot_params - value: { "order": "ndc" } - delegate_to: localhost - -- name: Customize network parameters - community.general.xenserver_guest: - hostname: "{{ xenserver_hostname }}" - username: "{{ xenserver_username }}" - password: "{{ xenserver_password }}" - name: testvm_10 - networks: - - name: VM Network - ip: 192.168.1.100/24 - gateway: 192.168.1.1 - - type: dhcp - delegate_to: localhost -''' - -RETURN = r''' -instance: - description: Metadata about the VM - returned: always - type: dict - sample: { - "cdrom": { - "type": "none" - }, - "customization_agent": "native", - "disks": [ - { - "name": "testvm_11-0", - "name_desc": "", - "os_device": "xvda", - "size": 42949672960, - "sr": "Local storage", - "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", - "vbd_userdevice": "0" - }, - { - "name": "testvm_11-1", - "name_desc": "", - "os_device": "xvdb", - "size": 42949672960, - "sr": "Local storage", - "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", - "vbd_userdevice": "1" - } - ], - "domid": "56", - "folder": "", - "hardware": { - "memory_mb": 8192, - "num_cpu_cores_per_socket": 2, - "num_cpus": 4 - }, - "home_server": "", - "is_template": false, - "name": "testvm_11", - "name_desc": "", - "networks": [ - { - "gateway": "192.168.0.254", - "gateway6": "fc00::fffe", - "ip": "192.168.0.200", - "ip6": [ - "fe80:0000:0000:0000:e9cb:625a:32c5:c291", - "fc00:0000:0000:0000:0000:0000:0000:0001" - ], - "mac": "ba:91:3a:48:20:76", - "mtu": "1500", - "name": "Pool-wide network associated with eth1", - "netmask": "255.255.255.128", - "prefix": "25", - "prefix6": "64", - "vif_device": "0" - } - ], - "other_config": { - "base_template_name": "Windows Server 2016 (64-bit)", - "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5", - "install-methods": "cdrom", - "instant": "true", - "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e" - }, - "platform": { - "acpi": "1", - "apic": "true", - "cores-per-socket": "2", - "device_id": "0002", - "hpet": "true", - "nx": "true", - "pae": "true", - "timeoffset": "-25200", - "vga": "std", - "videoram": "8", - "viridian": "true", - "viridian_reference_tsc": "true", - "viridian_time_ref_count": "true" - }, - "state": "poweredon", - "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda", - "xenstore_data": { - "vm-data": "" - } - } -changes: - description: Detected or made changes to VM - returned: always - type: list - sample: [ - { - "hardware": [ - "num_cpus" - ] - }, - { - "disks_changed": [ - [], - [ - "size" - ] - ] - }, - { - "disks_new": [ - { - "name": "new-disk", - "name_desc": "", - "position": 2, - "size_gb": "4", - "vbd_userdevice": "2" - } - ] - }, - { - "cdrom": [ - "type", - "iso_name" - ] - }, - { - "networks_changed": [ - [ - "mac" - ], - ] - }, - { - "networks_new": [ - { - "name": "Pool-wide network associated with eth2", - "position": 1, - "vif_device": "1" - } - ] - }, - "need_poweredoff" - ] -''' - -import re - -HAS_XENAPI = False -try: - import XenAPI - HAS_XENAPI = True -except ImportError: - pass - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.network import is_mac -from ansible.module_utils import six -from ansible_collections.community.general.plugins.module_utils.xenserver import ( - xenserver_common_argument_spec, XenServerObject, get_object_ref, - gather_vm_params, gather_vm_facts, set_vm_power_state, - wait_for_vm_ip_address, is_valid_ip_addr, is_valid_ip_netmask, - is_valid_ip_prefix, ip_prefix_to_netmask, ip_netmask_to_prefix, - is_valid_ip6_addr, is_valid_ip6_prefix) - - -class XenServerVM(XenServerObject): - """Class for managing XenServer VM. - - Attributes: - vm_ref (str): XAPI reference to VM. - vm_params (dict): A dictionary with VM parameters as returned - by gather_vm_params() function. - """ - - def __init__(self, module): - """Inits XenServerVM using module parameters. - - Args: - module: Reference to Ansible module object. - """ - super(XenServerVM, self).__init__(module) - - self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=False, msg_prefix="VM search: ") - self.gather_params() - - def exists(self): - """Returns True if VM exists, else False.""" - return True if self.vm_ref is not None else False - - def gather_params(self): - """Gathers all VM parameters available in XAPI database.""" - self.vm_params = gather_vm_params(self.module, self.vm_ref) - - def gather_facts(self): - """Gathers and returns VM facts.""" - return gather_vm_facts(self.module, self.vm_params) - - def set_power_state(self, power_state): - """Controls VM power state.""" - state_changed, current_state = set_vm_power_state(self.module, self.vm_ref, power_state, self.module.params['state_change_timeout']) - - # If state has changed, update vm_params. - if state_changed: - self.vm_params['power_state'] = current_state.capitalize() - - return state_changed - - def wait_for_ip_address(self): - """Waits for VM to acquire an IP address.""" - self.vm_params['guest_metrics'] = wait_for_vm_ip_address(self.module, self.vm_ref, self.module.params['state_change_timeout']) - - def deploy(self): - """Deploys new VM from template.""" - # Safety check. - if self.exists(): - self.module.fail_json(msg="Called deploy on existing VM!") - - try: - templ_ref = get_object_ref(self.module, self.module.params['template'], self.module.params['template_uuid'], obj_type="template", fail=True, - msg_prefix="VM deploy: ") - - # Is this an existing running VM? - if self.xapi_session.xenapi.VM.get_power_state(templ_ref).lower() != 'halted': - self.module.fail_json(msg="VM deploy: running VM cannot be used as a template!") - - # Find a SR we can use for VM.copy(). We use SR of the first disk - # if specified or default SR if not specified. - disk_params_list = self.module.params['disks'] - - sr_ref = None - - if disk_params_list: - disk_params = disk_params_list[0] - - disk_sr_uuid = disk_params.get('sr_uuid') - disk_sr = disk_params.get('sr') - - if disk_sr_uuid is not None or disk_sr is not None: - sr_ref = get_object_ref(self.module, disk_sr, disk_sr_uuid, obj_type="SR", fail=True, - msg_prefix="VM deploy disks[0]: ") - - if not sr_ref: - if self.default_sr_ref != "OpaqueRef:NULL": - sr_ref = self.default_sr_ref - else: - self.module.fail_json(msg="VM deploy disks[0]: no default SR found! You must specify SR explicitly.") - - # VM name could be an empty string which is bad. - if self.module.params['name'] is not None and not self.module.params['name']: - self.module.fail_json(msg="VM deploy: VM name must not be an empty string!") - - # Support for Ansible check mode. - if self.module.check_mode: - return - - # Now we can instantiate VM. We use VM.clone for linked_clone and - # VM.copy for non linked_clone. - if self.module.params['linked_clone']: - self.vm_ref = self.xapi_session.xenapi.VM.clone(templ_ref, self.module.params['name']) - else: - self.vm_ref = self.xapi_session.xenapi.VM.copy(templ_ref, self.module.params['name'], sr_ref) - - # Description is copied over from template so we reset it. - self.xapi_session.xenapi.VM.set_name_description(self.vm_ref, "") - - # If template is one of built-in XenServer templates, we have to - # do some additional steps. - # Note: VM.get_is_default_template() is supported from XenServer 7.2 - # onward so we use an alternative way. - templ_other_config = self.xapi_session.xenapi.VM.get_other_config(templ_ref) - - if "default_template" in templ_other_config and templ_other_config['default_template']: - # other_config of built-in XenServer templates have a key called - # 'disks' with the following content: - # disks: - # This value of other_data is copied to cloned or copied VM and - # it prevents provisioning of VM because sr is not specified and - # XAPI returns an error. To get around this, we remove the - # 'disks' key and add disks to VM later ourselves. - vm_other_config = self.xapi_session.xenapi.VM.get_other_config(self.vm_ref) - - if "disks" in vm_other_config: - del vm_other_config['disks'] - - self.xapi_session.xenapi.VM.set_other_config(self.vm_ref, vm_other_config) - - # At this point we have VM ready for provisioning. - self.xapi_session.xenapi.VM.provision(self.vm_ref) - - # After provisioning we can prepare vm_params for reconfigure(). - self.gather_params() - - # VM is almost ready. We just need to reconfigure it... - self.reconfigure() - - # Power on VM if needed. - if self.module.params['state'] == "poweredon": - self.set_power_state("poweredon") - - except XenAPI.Failure as f: - self.module.fail_json(msg="XAPI ERROR: %s" % f.details) - - def reconfigure(self): - """Reconfigures an existing VM. - - Returns: - list: parameters that were reconfigured. - """ - # Safety check. - if not self.exists(): - self.module.fail_json(msg="Called reconfigure on non existing VM!") - - config_changes = self.get_changes() - - vm_power_state_save = self.vm_params['power_state'].lower() - - if "need_poweredoff" in config_changes and vm_power_state_save != 'halted' and not self.module.params['force']: - self.module.fail_json(msg="VM reconfigure: VM has to be in powered off state to reconfigure but force was not specified!") - - # Support for Ansible check mode. - if self.module.check_mode: - return config_changes - - if "need_poweredoff" in config_changes and vm_power_state_save != 'halted' and self.module.params['force']: - self.set_power_state("shutdownguest") - - try: - for change in config_changes: - if isinstance(change, six.string_types): - if change == "name": - self.xapi_session.xenapi.VM.set_name_label(self.vm_ref, self.module.params['name']) - elif change == "name_desc": - self.xapi_session.xenapi.VM.set_name_description(self.vm_ref, self.module.params['name_desc']) - elif change == "folder": - self.xapi_session.xenapi.VM.remove_from_other_config(self.vm_ref, 'folder') - - if self.module.params['folder']: - self.xapi_session.xenapi.VM.add_to_other_config(self.vm_ref, 'folder', self.module.params['folder']) - elif change == "home_server": - if self.module.params['home_server']: - host_ref = self.xapi_session.xenapi.host.get_by_name_label(self.module.params['home_server'])[0] - else: - host_ref = "OpaqueRef:NULL" - - self.xapi_session.xenapi.VM.set_affinity(self.vm_ref, host_ref) - elif isinstance(change, dict): - if change.get('hardware'): - for hardware_change in change['hardware']: - if hardware_change == "num_cpus": - num_cpus = int(self.module.params['hardware']['num_cpus']) - - if num_cpus < int(self.vm_params['VCPUs_at_startup']): - self.xapi_session.xenapi.VM.set_VCPUs_at_startup(self.vm_ref, str(num_cpus)) - self.xapi_session.xenapi.VM.set_VCPUs_max(self.vm_ref, str(num_cpus)) - else: - self.xapi_session.xenapi.VM.set_VCPUs_max(self.vm_ref, str(num_cpus)) - self.xapi_session.xenapi.VM.set_VCPUs_at_startup(self.vm_ref, str(num_cpus)) - elif hardware_change == "num_cpu_cores_per_socket": - self.xapi_session.xenapi.VM.remove_from_platform(self.vm_ref, 'cores-per-socket') - num_cpu_cores_per_socket = int(self.module.params['hardware']['num_cpu_cores_per_socket']) - - if num_cpu_cores_per_socket > 1: - self.xapi_session.xenapi.VM.add_to_platform(self.vm_ref, 'cores-per-socket', str(num_cpu_cores_per_socket)) - elif hardware_change == "memory_mb": - memory_b = str(int(self.module.params['hardware']['memory_mb']) * 1048576) - vm_memory_static_min_b = str(min(int(memory_b), int(self.vm_params['memory_static_min']))) - - self.xapi_session.xenapi.VM.set_memory_limits(self.vm_ref, vm_memory_static_min_b, memory_b, memory_b, memory_b) - elif change.get('disks_changed'): - vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"] - position = 0 - - for disk_change_list in change['disks_changed']: - for disk_change in disk_change_list: - vdi_ref = self.xapi_session.xenapi.VDI.get_by_uuid(vm_disk_params_list[position]['VDI']['uuid']) - - if disk_change == "name": - self.xapi_session.xenapi.VDI.set_name_label(vdi_ref, self.module.params['disks'][position]['name']) - elif disk_change == "name_desc": - self.xapi_session.xenapi.VDI.set_name_description(vdi_ref, self.module.params['disks'][position]['name_desc']) - elif disk_change == "size": - self.xapi_session.xenapi.VDI.resize(vdi_ref, str(self.get_normalized_disk_size(self.module.params['disks'][position], - "VM reconfigure disks[%s]: " % position))) - - position += 1 - elif change.get('disks_new'): - for position, disk_userdevice in change['disks_new']: - disk_params = self.module.params['disks'][position] - - disk_name = disk_params['name'] if disk_params.get('name') else "%s-%s" % (self.vm_params['name_label'], position) - disk_name_desc = disk_params['name_desc'] if disk_params.get('name_desc') else "" - - if disk_params.get('sr_uuid'): - sr_ref = self.xapi_session.xenapi.SR.get_by_uuid(disk_params['sr_uuid']) - elif disk_params.get('sr'): - sr_ref = self.xapi_session.xenapi.SR.get_by_name_label(disk_params['sr'])[0] - else: - sr_ref = self.default_sr_ref - - disk_size = str(self.get_normalized_disk_size(self.module.params['disks'][position], "VM reconfigure disks[%s]: " % position)) - - new_disk_vdi = { - "name_label": disk_name, - "name_description": disk_name_desc, - "SR": sr_ref, - "virtual_size": disk_size, - "type": "user", - "sharable": False, - "read_only": False, - "other_config": {}, - } - - new_disk_vbd = { - "VM": self.vm_ref, - "VDI": None, - "userdevice": disk_userdevice, - "bootable": False, - "mode": "RW", - "type": "Disk", - "empty": False, - "other_config": {}, - "qos_algorithm_type": "", - "qos_algorithm_params": {}, - } - - new_disk_vbd['VDI'] = self.xapi_session.xenapi.VDI.create(new_disk_vdi) - vbd_ref_new = self.xapi_session.xenapi.VBD.create(new_disk_vbd) - - if self.vm_params['power_state'].lower() == "running": - self.xapi_session.xenapi.VBD.plug(vbd_ref_new) - - elif change.get('cdrom'): - vm_cdrom_params_list = [cdrom_params for cdrom_params in self.vm_params['VBDs'] if cdrom_params['type'] == "CD"] - - # If there is no CD present, we have to create one. - if not vm_cdrom_params_list: - # We will try to place cdrom at userdevice position - # 3 (which is default) if it is not already occupied - # else we will place it at first allowed position. - cdrom_userdevices_allowed = self.xapi_session.xenapi.VM.get_allowed_VBD_devices(self.vm_ref) - - if "3" in cdrom_userdevices_allowed: - cdrom_userdevice = "3" - else: - cdrom_userdevice = cdrom_userdevices_allowed[0] - - cdrom_vbd = { - "VM": self.vm_ref, - "VDI": "OpaqueRef:NULL", - "userdevice": cdrom_userdevice, - "bootable": False, - "mode": "RO", - "type": "CD", - "empty": True, - "other_config": {}, - "qos_algorithm_type": "", - "qos_algorithm_params": {}, - } - - cdrom_vbd_ref = self.xapi_session.xenapi.VBD.create(cdrom_vbd) - else: - cdrom_vbd_ref = self.xapi_session.xenapi.VBD.get_by_uuid(vm_cdrom_params_list[0]['uuid']) - - cdrom_is_empty = self.xapi_session.xenapi.VBD.get_empty(cdrom_vbd_ref) - - for cdrom_change in change['cdrom']: - if cdrom_change == "type": - cdrom_type = self.module.params['cdrom']['type'] - - if cdrom_type == "none" and not cdrom_is_empty: - self.xapi_session.xenapi.VBD.eject(cdrom_vbd_ref) - elif cdrom_type == "host": - # Unimplemented! - pass - - elif cdrom_change == "iso_name": - if not cdrom_is_empty: - self.xapi_session.xenapi.VBD.eject(cdrom_vbd_ref) - - cdrom_vdi_ref = self.xapi_session.xenapi.VDI.get_by_name_label(self.module.params['cdrom']['iso_name'])[0] - self.xapi_session.xenapi.VBD.insert(cdrom_vbd_ref, cdrom_vdi_ref) - elif change.get('networks_changed'): - position = 0 - - for network_change_list in change['networks_changed']: - if network_change_list: - vm_vif_params = self.vm_params['VIFs'][position] - network_params = self.module.params['networks'][position] - - vif_ref = self.xapi_session.xenapi.VIF.get_by_uuid(vm_vif_params['uuid']) - network_ref = self.xapi_session.xenapi.network.get_by_uuid(vm_vif_params['network']['uuid']) - - vif_recreated = False - - if "name" in network_change_list or "mac" in network_change_list: - # To change network or MAC, we destroy old - # VIF and then create a new one with changed - # parameters. That's how XenCenter does it. - - # Copy all old parameters to new VIF record. - vif = { - "device": vm_vif_params['device'], - "network": network_ref, - "VM": vm_vif_params['VM'], - "MAC": vm_vif_params['MAC'], - "MTU": vm_vif_params['MTU'], - "other_config": vm_vif_params['other_config'], - "qos_algorithm_type": vm_vif_params['qos_algorithm_type'], - "qos_algorithm_params": vm_vif_params['qos_algorithm_params'], - "locking_mode": vm_vif_params['locking_mode'], - "ipv4_allowed": vm_vif_params['ipv4_allowed'], - "ipv6_allowed": vm_vif_params['ipv6_allowed'], - } - - if "name" in network_change_list: - network_ref_new = self.xapi_session.xenapi.network.get_by_name_label(network_params['name'])[0] - vif['network'] = network_ref_new - vif['MTU'] = self.xapi_session.xenapi.network.get_MTU(network_ref_new) - - if "mac" in network_change_list: - vif['MAC'] = network_params['mac'].lower() - - if self.vm_params['power_state'].lower() == "running": - self.xapi_session.xenapi.VIF.unplug(vif_ref) - - self.xapi_session.xenapi.VIF.destroy(vif_ref) - vif_ref_new = self.xapi_session.xenapi.VIF.create(vif) - - if self.vm_params['power_state'].lower() == "running": - self.xapi_session.xenapi.VIF.plug(vif_ref_new) - - vif_ref = vif_ref_new - vif_recreated = True - - if self.vm_params['customization_agent'] == "native": - vif_reconfigure_needed = False - - if "type" in network_change_list: - network_type = network_params['type'].capitalize() - vif_reconfigure_needed = True - else: - network_type = vm_vif_params['ipv4_configuration_mode'] - - if "ip" in network_change_list: - network_ip = network_params['ip'] - vif_reconfigure_needed = True - elif vm_vif_params['ipv4_addresses']: - network_ip = vm_vif_params['ipv4_addresses'][0].split('/')[0] - else: - network_ip = "" - - if "prefix" in network_change_list: - network_prefix = "/%s" % network_params['prefix'] - vif_reconfigure_needed = True - elif vm_vif_params['ipv4_addresses'] and vm_vif_params['ipv4_addresses'][0]: - network_prefix = "/%s" % vm_vif_params['ipv4_addresses'][0].split('/')[1] - else: - network_prefix = "" - - if "gateway" in network_change_list: - network_gateway = network_params['gateway'] - vif_reconfigure_needed = True - else: - network_gateway = vm_vif_params['ipv4_gateway'] - - if vif_recreated or vif_reconfigure_needed: - self.xapi_session.xenapi.VIF.configure_ipv4(vif_ref, network_type, - "%s%s" % (network_ip, network_prefix), network_gateway) - - vif_reconfigure_needed = False - - if "type6" in network_change_list: - network_type6 = network_params['type6'].capitalize() - vif_reconfigure_needed = True - else: - network_type6 = vm_vif_params['ipv6_configuration_mode'] - - if "ip6" in network_change_list: - network_ip6 = network_params['ip6'] - vif_reconfigure_needed = True - elif vm_vif_params['ipv6_addresses']: - network_ip6 = vm_vif_params['ipv6_addresses'][0].split('/')[0] - else: - network_ip6 = "" - - if "prefix6" in network_change_list: - network_prefix6 = "/%s" % network_params['prefix6'] - vif_reconfigure_needed = True - elif vm_vif_params['ipv6_addresses'] and vm_vif_params['ipv6_addresses'][0]: - network_prefix6 = "/%s" % vm_vif_params['ipv6_addresses'][0].split('/')[1] - else: - network_prefix6 = "" - - if "gateway6" in network_change_list: - network_gateway6 = network_params['gateway6'] - vif_reconfigure_needed = True - else: - network_gateway6 = vm_vif_params['ipv6_gateway'] - - if vif_recreated or vif_reconfigure_needed: - self.xapi_session.xenapi.VIF.configure_ipv6(vif_ref, network_type6, - "%s%s" % (network_ip6, network_prefix6), network_gateway6) - - elif self.vm_params['customization_agent'] == "custom": - vif_device = vm_vif_params['device'] - - # A user could have manually changed network - # or mac e.g. trough XenCenter and then also - # make those changes in playbook manually. - # In that case, module will not detect any - # changes and info in xenstore_data will - # become stale. For that reason we always - # update name and mac in xenstore_data. - - # Since we handle name and mac differently, - # we have to remove them from - # network_change_list. - network_change_list_tmp = [net_chg for net_chg in network_change_list if net_chg not in ['name', 'mac']] - - for network_change in network_change_list_tmp + ['name', 'mac']: - self.xapi_session.xenapi.VM.remove_from_xenstore_data(self.vm_ref, - "vm-data/networks/%s/%s" % (vif_device, network_change)) - - if network_params.get('name'): - network_name = network_params['name'] - else: - network_name = vm_vif_params['network']['name_label'] - - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, - "vm-data/networks/%s/%s" % (vif_device, 'name'), network_name) - - if network_params.get('mac'): - network_mac = network_params['mac'].lower() - else: - network_mac = vm_vif_params['MAC'].lower() - - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, - "vm-data/networks/%s/%s" % (vif_device, 'mac'), network_mac) - - for network_change in network_change_list_tmp: - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, - "vm-data/networks/%s/%s" % (vif_device, network_change), - network_params[network_change]) - - position += 1 - elif change.get('networks_new'): - for position, vif_device in change['networks_new']: - network_params = self.module.params['networks'][position] - - network_ref = self.xapi_session.xenapi.network.get_by_name_label(network_params['name'])[0] - - network_name = network_params['name'] - network_mac = network_params['mac'] if network_params.get('mac') else "" - network_type = network_params.get('type') - network_ip = network_params['ip'] if network_params.get('ip') else "" - network_prefix = network_params['prefix'] if network_params.get('prefix') else "" - network_netmask = network_params['netmask'] if network_params.get('netmask') else "" - network_gateway = network_params['gateway'] if network_params.get('gateway') else "" - network_type6 = network_params.get('type6') - network_ip6 = network_params['ip6'] if network_params.get('ip6') else "" - network_prefix6 = network_params['prefix6'] if network_params.get('prefix6') else "" - network_gateway6 = network_params['gateway6'] if network_params.get('gateway6') else "" - - vif = { - "device": vif_device, - "network": network_ref, - "VM": self.vm_ref, - "MAC": network_mac, - "MTU": self.xapi_session.xenapi.network.get_MTU(network_ref), - "other_config": {}, - "qos_algorithm_type": "", - "qos_algorithm_params": {}, - } - - vif_ref_new = self.xapi_session.xenapi.VIF.create(vif) - - if self.vm_params['power_state'].lower() == "running": - self.xapi_session.xenapi.VIF.plug(vif_ref_new) - - if self.vm_params['customization_agent'] == "native": - if network_type and network_type == "static": - self.xapi_session.xenapi.VIF.configure_ipv4(vif_ref_new, "Static", - "%s/%s" % (network_ip, network_prefix), network_gateway) - - if network_type6 and network_type6 == "static": - self.xapi_session.xenapi.VIF.configure_ipv6(vif_ref_new, "Static", - "%s/%s" % (network_ip6, network_prefix6), network_gateway6) - elif self.vm_params['customization_agent'] == "custom": - # We first have to remove any existing data - # from xenstore_data because there could be - # some old leftover data from some interface - # that once occupied same device location as - # our new interface. - for network_param in ['name', 'mac', 'type', 'ip', 'prefix', 'netmask', 'gateway', 'type6', 'ip6', 'prefix6', 'gateway6']: - self.xapi_session.xenapi.VM.remove_from_xenstore_data(self.vm_ref, "vm-data/networks/%s/%s" % (vif_device, network_param)) - - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/name" % vif_device, network_name) - - # We get MAC from VIF itself instead of - # networks.mac because it could be - # autogenerated. - vm_vif_mac = self.xapi_session.xenapi.VIF.get_MAC(vif_ref_new) - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/mac" % vif_device, vm_vif_mac) - - if network_type: - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/type" % vif_device, network_type) - - if network_type == "static": - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, - "vm-data/networks/%s/ip" % vif_device, network_ip) - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, - "vm-data/networks/%s/prefix" % vif_device, network_prefix) - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, - "vm-data/networks/%s/netmask" % vif_device, network_netmask) - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, - "vm-data/networks/%s/gateway" % vif_device, network_gateway) - - if network_type6: - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/type6" % vif_device, network_type6) - - if network_type6 == "static": - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, - "vm-data/networks/%s/ip6" % vif_device, network_ip6) - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, - "vm-data/networks/%s/prefix6" % vif_device, network_prefix6) - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, - "vm-data/networks/%s/gateway6" % vif_device, network_gateway6) - - elif change.get('custom_params'): - for position in change['custom_params']: - custom_param_key = self.module.params['custom_params'][position]['key'] - custom_param_value = self.module.params['custom_params'][position]['value'] - self.xapi_session.xenapi_request("VM.set_%s" % custom_param_key, (self.vm_ref, custom_param_value)) - - if self.module.params['is_template']: - self.xapi_session.xenapi.VM.set_is_a_template(self.vm_ref, True) - elif "need_poweredoff" in config_changes and self.module.params['force'] and vm_power_state_save != 'halted': - self.set_power_state("poweredon") - - # Gather new params after reconfiguration. - self.gather_params() - - except XenAPI.Failure as f: - self.module.fail_json(msg="XAPI ERROR: %s" % f.details) - - return config_changes - - def destroy(self): - """Removes an existing VM with associated disks""" - # Safety check. - if not self.exists(): - self.module.fail_json(msg="Called destroy on non existing VM!") - - if self.vm_params['power_state'].lower() != 'halted' and not self.module.params['force']: - self.module.fail_json(msg="VM destroy: VM has to be in powered off state to destroy but force was not specified!") - - # Support for Ansible check mode. - if self.module.check_mode: - return - - # Make sure that VM is poweredoff before we can destroy it. - self.set_power_state("poweredoff") - - try: - # Destroy VM! - self.xapi_session.xenapi.VM.destroy(self.vm_ref) - - vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"] - - # Destroy all VDIs associated with VM! - for vm_disk_params in vm_disk_params_list: - vdi_ref = self.xapi_session.xenapi.VDI.get_by_uuid(vm_disk_params['VDI']['uuid']) - - self.xapi_session.xenapi.VDI.destroy(vdi_ref) - - except XenAPI.Failure as f: - self.module.fail_json(msg="XAPI ERROR: %s" % f.details) - - def get_changes(self): - """Finds VM parameters that differ from specified ones. - - This method builds a dictionary with hierarchy of VM parameters - that differ from those specified in module parameters. - - Returns: - list: VM parameters that differ from those specified in - module parameters. - """ - # Safety check. - if not self.exists(): - self.module.fail_json(msg="Called get_changes on non existing VM!") - - need_poweredoff = False - - if self.module.params['is_template']: - need_poweredoff = True - - try: - # This VM could be a template or a snapshot. In that case we fail - # because we can't reconfigure them or it would just be too - # dangerous. - if self.vm_params['is_a_template'] and not self.vm_params['is_a_snapshot']: - self.module.fail_json(msg="VM check: targeted VM is a template! Template reconfiguration is not supported.") - - if self.vm_params['is_a_snapshot']: - self.module.fail_json(msg="VM check: targeted VM is a snapshot! Snapshot reconfiguration is not supported.") - - # Let's build a list of parameters that changed. - config_changes = [] - - # Name could only differ if we found an existing VM by uuid. - if self.module.params['name'] is not None and self.module.params['name'] != self.vm_params['name_label']: - if self.module.params['name']: - config_changes.append('name') - else: - self.module.fail_json(msg="VM check name: VM name cannot be an empty string!") - - if self.module.params['name_desc'] is not None and self.module.params['name_desc'] != self.vm_params['name_description']: - config_changes.append('name_desc') - - # Folder parameter is found in other_config. - vm_other_config = self.vm_params['other_config'] - vm_folder = vm_other_config.get('folder', '') - - if self.module.params['folder'] is not None and self.module.params['folder'] != vm_folder: - config_changes.append('folder') - - if self.module.params['home_server'] is not None: - if (self.module.params['home_server'] and - (not self.vm_params['affinity'] or self.module.params['home_server'] != self.vm_params['affinity']['name_label'])): - - # Check existance only. Ignore return value. - get_object_ref(self.module, self.module.params['home_server'], uuid=None, obj_type="home server", fail=True, - msg_prefix="VM check home_server: ") - - config_changes.append('home_server') - elif not self.module.params['home_server'] and self.vm_params['affinity']: - config_changes.append('home_server') - - config_changes_hardware = [] - - if self.module.params['hardware']: - num_cpus = self.module.params['hardware'].get('num_cpus') - - if num_cpus is not None: - # Kept for compatibility with older Ansible versions that - # do not support subargument specs. - try: - num_cpus = int(num_cpus) - except ValueError as e: - self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be an integer value!") - - if num_cpus < 1: - self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be greater than zero!") - - # We can use VCPUs_at_startup or VCPUs_max parameter. I'd - # say the former is the way to go but this needs - # confirmation and testing. - if num_cpus != int(self.vm_params['VCPUs_at_startup']): - config_changes_hardware.append('num_cpus') - # For now, we don't support hotpluging so VM has to be in - # poweredoff state to reconfigure. - need_poweredoff = True - - num_cpu_cores_per_socket = self.module.params['hardware'].get('num_cpu_cores_per_socket') - - if num_cpu_cores_per_socket is not None: - # Kept for compatibility with older Ansible versions that - # do not support subargument specs. - try: - num_cpu_cores_per_socket = int(num_cpu_cores_per_socket) - except ValueError as e: - self.module.fail_json(msg="VM check hardware.num_cpu_cores_per_socket: parameter should be an integer value!") - - if num_cpu_cores_per_socket < 1: - self.module.fail_json(msg="VM check hardware.num_cpu_cores_per_socket: parameter should be greater than zero!") - - if num_cpus and num_cpus % num_cpu_cores_per_socket != 0: - self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be a multiple of hardware.num_cpu_cores_per_socket!") - - vm_platform = self.vm_params['platform'] - vm_cores_per_socket = int(vm_platform.get('cores-per-socket', 1)) - - if num_cpu_cores_per_socket != vm_cores_per_socket: - config_changes_hardware.append('num_cpu_cores_per_socket') - # For now, we don't support hotpluging so VM has to be - # in poweredoff state to reconfigure. - need_poweredoff = True - - memory_mb = self.module.params['hardware'].get('memory_mb') - - if memory_mb is not None: - # Kept for compatibility with older Ansible versions that - # do not support subargument specs. - try: - memory_mb = int(memory_mb) - except ValueError as e: - self.module.fail_json(msg="VM check hardware.memory_mb: parameter should be an integer value!") - - if memory_mb < 1: - self.module.fail_json(msg="VM check hardware.memory_mb: parameter should be greater than zero!") - - # There are multiple memory parameters: - # - memory_dynamic_max - # - memory_dynamic_min - # - memory_static_max - # - memory_static_min - # - memory_target - # - # memory_target seems like a good candidate but it returns 0 for - # halted VMs so we can't use it. - # - # I decided to use memory_dynamic_max and memory_static_max - # and use whichever is larger. This strategy needs validation - # and testing. - # - # XenServer stores memory size in bytes so we need to divide - # it by 1024*1024 = 1048576. - if memory_mb != int(max(int(self.vm_params['memory_dynamic_max']), int(self.vm_params['memory_static_max'])) / 1048576): - config_changes_hardware.append('memory_mb') - # For now, we don't support hotpluging so VM has to be in - # poweredoff state to reconfigure. - need_poweredoff = True - - if config_changes_hardware: - config_changes.append({"hardware": config_changes_hardware}) - - config_changes_disks = [] - config_new_disks = [] - - # Find allowed userdevices. - vbd_userdevices_allowed = self.xapi_session.xenapi.VM.get_allowed_VBD_devices(self.vm_ref) - - if self.module.params['disks']: - # Get the list of all disk. Filter out any CDs found. - vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"] - - # Number of disks defined in module params have to be same or - # higher than a number of existing disks attached to the VM. - # We don't support removal or detachment of disks. - if len(self.module.params['disks']) < len(vm_disk_params_list): - self.module.fail_json(msg="VM check disks: provided disks configuration has less disks than the target VM (%d < %d)!" % - (len(self.module.params['disks']), len(vm_disk_params_list))) - - # Find the highest disk occupied userdevice. - if not vm_disk_params_list: - vm_disk_userdevice_highest = "-1" - else: - vm_disk_userdevice_highest = vm_disk_params_list[-1]['userdevice'] - - for position in range(len(self.module.params['disks'])): - if position < len(vm_disk_params_list): - vm_disk_params = vm_disk_params_list[position] - else: - vm_disk_params = None - - disk_params = self.module.params['disks'][position] - - disk_size = self.get_normalized_disk_size(self.module.params['disks'][position], "VM check disks[%s]: " % position) - - disk_name = disk_params.get('name') - - if disk_name is not None and not disk_name: - self.module.fail_json(msg="VM check disks[%s]: disk name cannot be an empty string!" % position) - - # If this is an existing disk. - if vm_disk_params and vm_disk_params['VDI']: - disk_changes = [] - - if disk_name and disk_name != vm_disk_params['VDI']['name_label']: - disk_changes.append('name') - - disk_name_desc = disk_params.get('name_desc') - - if disk_name_desc is not None and disk_name_desc != vm_disk_params['VDI']['name_description']: - disk_changes.append('name_desc') - - if disk_size: - if disk_size > int(vm_disk_params['VDI']['virtual_size']): - disk_changes.append('size') - need_poweredoff = True - elif disk_size < int(vm_disk_params['VDI']['virtual_size']): - self.module.fail_json(msg="VM check disks[%s]: disk size is smaller than existing (%d bytes < %s bytes). " - "Reducing disk size is not allowed!" % (position, disk_size, vm_disk_params['VDI']['virtual_size'])) - - config_changes_disks.append(disk_changes) - # If this is a new disk. - else: - if not disk_size: - self.module.fail_json(msg="VM check disks[%s]: no valid disk size specification found!" % position) - - disk_sr_uuid = disk_params.get('sr_uuid') - disk_sr = disk_params.get('sr') - - if disk_sr_uuid is not None or disk_sr is not None: - # Check existance only. Ignore return value. - get_object_ref(self.module, disk_sr, disk_sr_uuid, obj_type="SR", fail=True, - msg_prefix="VM check disks[%s]: " % position) - elif self.default_sr_ref == 'OpaqueRef:NULL': - self.module.fail_json(msg="VM check disks[%s]: no default SR found! You must specify SR explicitly." % position) - - if not vbd_userdevices_allowed: - self.module.fail_json(msg="VM check disks[%s]: maximum number of devices reached!" % position) - - disk_userdevice = None - - # We need to place a new disk right above the highest - # placed existing disk to maintain relative disk - # positions pairable with disk specifications in - # module params. That place must not be occupied by - # some other device like CD-ROM. - for userdevice in vbd_userdevices_allowed: - if int(userdevice) > int(vm_disk_userdevice_highest): - disk_userdevice = userdevice - vbd_userdevices_allowed.remove(userdevice) - vm_disk_userdevice_highest = userdevice - break - - # If no place was found. - if disk_userdevice is None: - # Highest occupied place could be a CD-ROM device - # so we have to include all devices regardless of - # type when calculating out-of-bound position. - disk_userdevice = str(int(self.vm_params['VBDs'][-1]['userdevice']) + 1) - self.module.fail_json(msg="VM check disks[%s]: new disk position %s is out of bounds!" % (position, disk_userdevice)) - - # For new disks we only track their position. - config_new_disks.append((position, disk_userdevice)) - - # We should append config_changes_disks to config_changes only - # if there is at least one changed disk, else skip. - for disk_change in config_changes_disks: - if disk_change: - config_changes.append({"disks_changed": config_changes_disks}) - break - - if config_new_disks: - config_changes.append({"disks_new": config_new_disks}) - - config_changes_cdrom = [] - - if self.module.params['cdrom']: - # Get the list of all CD-ROMs. Filter out any regular disks - # found. If we found no existing CD-ROM, we will create it - # later else take the first one found. - vm_cdrom_params_list = [cdrom_params for cdrom_params in self.vm_params['VBDs'] if cdrom_params['type'] == "CD"] - - # If no existing CD-ROM is found, we will need to add one. - # We need to check if there is any userdevice allowed. - if not vm_cdrom_params_list and not vbd_userdevices_allowed: - self.module.fail_json(msg="VM check cdrom: maximum number of devices reached!") - - cdrom_type = self.module.params['cdrom'].get('type') - cdrom_iso_name = self.module.params['cdrom'].get('iso_name') - - # If cdrom.iso_name is specified but cdrom.type is not, - # then set cdrom.type to 'iso', unless cdrom.iso_name is - # an empty string, in that case set cdrom.type to 'none'. - if not cdrom_type: - if cdrom_iso_name: - cdrom_type = "iso" - elif cdrom_iso_name is not None: - cdrom_type = "none" - - self.module.params['cdrom']['type'] = cdrom_type - - # If type changed. - if cdrom_type and (not vm_cdrom_params_list or cdrom_type != self.get_cdrom_type(vm_cdrom_params_list[0])): - config_changes_cdrom.append('type') - - if cdrom_type == "iso": - # Check if ISO exists. - # Check existance only. Ignore return value. - get_object_ref(self.module, cdrom_iso_name, uuid=None, obj_type="ISO image", fail=True, - msg_prefix="VM check cdrom.iso_name: ") - - # Is ISO image changed? - if (cdrom_iso_name and - (not vm_cdrom_params_list or - not vm_cdrom_params_list[0]['VDI'] or - cdrom_iso_name != vm_cdrom_params_list[0]['VDI']['name_label'])): - config_changes_cdrom.append('iso_name') - - if config_changes_cdrom: - config_changes.append({"cdrom": config_changes_cdrom}) - - config_changes_networks = [] - config_new_networks = [] - - # Find allowed devices. - vif_devices_allowed = self.xapi_session.xenapi.VM.get_allowed_VIF_devices(self.vm_ref) - - if self.module.params['networks']: - # Number of VIFs defined in module params have to be same or - # higher than a number of existing VIFs attached to the VM. - # We don't support removal of VIFs. - if len(self.module.params['networks']) < len(self.vm_params['VIFs']): - self.module.fail_json(msg="VM check networks: provided networks configuration has less interfaces than the target VM (%d < %d)!" % - (len(self.module.params['networks']), len(self.vm_params['VIFs']))) - - # Find the highest occupied device. - if not self.vm_params['VIFs']: - vif_device_highest = "-1" - else: - vif_device_highest = self.vm_params['VIFs'][-1]['device'] - - for position in range(len(self.module.params['networks'])): - if position < len(self.vm_params['VIFs']): - vm_vif_params = self.vm_params['VIFs'][position] - else: - vm_vif_params = None - - network_params = self.module.params['networks'][position] - - network_name = network_params.get('name') - - if network_name is not None and not network_name: - self.module.fail_json(msg="VM check networks[%s]: network name cannot be an empty string!" % position) - - if network_name: - # Check existance only. Ignore return value. - get_object_ref(self.module, network_name, uuid=None, obj_type="network", fail=True, - msg_prefix="VM check networks[%s]: " % position) - - network_mac = network_params.get('mac') - - if network_mac is not None: - network_mac = network_mac.lower() - - if not is_mac(network_mac): - self.module.fail_json(msg="VM check networks[%s]: specified MAC address '%s' is not valid!" % (position, network_mac)) - - # IPv4 reconfiguration. - network_type = network_params.get('type') - network_ip = network_params.get('ip') - network_netmask = network_params.get('netmask') - network_prefix = None - - # If networks.ip is specified and networks.type is not, - # then set networks.type to 'static'. - if not network_type and network_ip: - network_type = "static" - - # XenServer natively supports only 'none' and 'static' - # type with 'none' being the same as 'dhcp'. - if self.vm_params['customization_agent'] == "native" and network_type and network_type == "dhcp": - network_type = "none" - - if network_type and network_type == "static": - if network_ip is not None: - network_ip_split = network_ip.split('/') - network_ip = network_ip_split[0] - - if network_ip and not is_valid_ip_addr(network_ip): - self.module.fail_json(msg="VM check networks[%s]: specified IPv4 address '%s' is not valid!" % (position, network_ip)) - - if len(network_ip_split) > 1: - network_prefix = network_ip_split[1] - - if not is_valid_ip_prefix(network_prefix): - self.module.fail_json(msg="VM check networks[%s]: specified IPv4 prefix '%s' is not valid!" % (position, network_prefix)) - - if network_netmask is not None: - if not is_valid_ip_netmask(network_netmask): - self.module.fail_json(msg="VM check networks[%s]: specified IPv4 netmask '%s' is not valid!" % (position, network_netmask)) - - network_prefix = ip_netmask_to_prefix(network_netmask, skip_check=True) - elif network_prefix is not None: - network_netmask = ip_prefix_to_netmask(network_prefix, skip_check=True) - - # If any parameter is overridden at this point, update it. - if network_type: - network_params['type'] = network_type - - if network_ip: - network_params['ip'] = network_ip - - if network_netmask: - network_params['netmask'] = network_netmask - - if network_prefix: - network_params['prefix'] = network_prefix - - network_gateway = network_params.get('gateway') - - # Gateway can be an empty string (when removing gateway - # configuration) but if it is not, it should be validated. - if network_gateway and not is_valid_ip_addr(network_gateway): - self.module.fail_json(msg="VM check networks[%s]: specified IPv4 gateway '%s' is not valid!" % (position, network_gateway)) - - # IPv6 reconfiguration. - network_type6 = network_params.get('type6') - network_ip6 = network_params.get('ip6') - network_prefix6 = None - - # If networks.ip6 is specified and networks.type6 is not, - # then set networks.type6 to 'static'. - if not network_type6 and network_ip6: - network_type6 = "static" - - # XenServer natively supports only 'none' and 'static' - # type with 'none' being the same as 'dhcp'. - if self.vm_params['customization_agent'] == "native" and network_type6 and network_type6 == "dhcp": - network_type6 = "none" - - if network_type6 and network_type6 == "static": - if network_ip6 is not None: - network_ip6_split = network_ip6.split('/') - network_ip6 = network_ip6_split[0] - - if network_ip6 and not is_valid_ip6_addr(network_ip6): - self.module.fail_json(msg="VM check networks[%s]: specified IPv6 address '%s' is not valid!" % (position, network_ip6)) - - if len(network_ip6_split) > 1: - network_prefix6 = network_ip6_split[1] - - if not is_valid_ip6_prefix(network_prefix6): - self.module.fail_json(msg="VM check networks[%s]: specified IPv6 prefix '%s' is not valid!" % (position, network_prefix6)) - - # If any parameter is overridden at this point, update it. - if network_type6: - network_params['type6'] = network_type6 - - if network_ip6: - network_params['ip6'] = network_ip6 - - if network_prefix6: - network_params['prefix6'] = network_prefix6 - - network_gateway6 = network_params.get('gateway6') - - # Gateway can be an empty string (when removing gateway - # configuration) but if it is not, it should be validated. - if network_gateway6 and not is_valid_ip6_addr(network_gateway6): - self.module.fail_json(msg="VM check networks[%s]: specified IPv6 gateway '%s' is not valid!" % (position, network_gateway6)) - - # If this is an existing VIF. - if vm_vif_params and vm_vif_params['network']: - network_changes = [] - - if network_name and network_name != vm_vif_params['network']['name_label']: - network_changes.append('name') - - if network_mac and network_mac != vm_vif_params['MAC'].lower(): - network_changes.append('mac') - - if self.vm_params['customization_agent'] == "native": - if network_type and network_type != vm_vif_params['ipv4_configuration_mode'].lower(): - network_changes.append('type') - - if network_type and network_type == "static": - if network_ip and (not vm_vif_params['ipv4_addresses'] or - not vm_vif_params['ipv4_addresses'][0] or - network_ip != vm_vif_params['ipv4_addresses'][0].split('/')[0]): - network_changes.append('ip') - - if network_prefix and (not vm_vif_params['ipv4_addresses'] or - not vm_vif_params['ipv4_addresses'][0] or - network_prefix != vm_vif_params['ipv4_addresses'][0].split('/')[1]): - network_changes.append('prefix') - network_changes.append('netmask') - - if network_gateway is not None and network_gateway != vm_vif_params['ipv4_gateway']: - network_changes.append('gateway') - - if network_type6 and network_type6 != vm_vif_params['ipv6_configuration_mode'].lower(): - network_changes.append('type6') - - if network_type6 and network_type6 == "static": - if network_ip6 and (not vm_vif_params['ipv6_addresses'] or - not vm_vif_params['ipv6_addresses'][0] or - network_ip6 != vm_vif_params['ipv6_addresses'][0].split('/')[0]): - network_changes.append('ip6') - - if network_prefix6 and (not vm_vif_params['ipv6_addresses'] or - not vm_vif_params['ipv6_addresses'][0] or - network_prefix6 != vm_vif_params['ipv6_addresses'][0].split('/')[1]): - network_changes.append('prefix6') - - if network_gateway6 is not None and network_gateway6 != vm_vif_params['ipv6_gateway']: - network_changes.append('gateway6') - - elif self.vm_params['customization_agent'] == "custom": - vm_xenstore_data = self.vm_params['xenstore_data'] - - if network_type and network_type != vm_xenstore_data.get('vm-data/networks/%s/type' % vm_vif_params['device'], "none"): - network_changes.append('type') - need_poweredoff = True - - if network_type and network_type == "static": - if network_ip and network_ip != vm_xenstore_data.get('vm-data/networks/%s/ip' % vm_vif_params['device'], ""): - network_changes.append('ip') - need_poweredoff = True - - if network_prefix and network_prefix != vm_xenstore_data.get('vm-data/networks/%s/prefix' % vm_vif_params['device'], ""): - network_changes.append('prefix') - network_changes.append('netmask') - need_poweredoff = True - - if network_gateway is not None and network_gateway != vm_xenstore_data.get('vm-data/networks/%s/gateway' % - vm_vif_params['device'], ""): - network_changes.append('gateway') - need_poweredoff = True - - if network_type6 and network_type6 != vm_xenstore_data.get('vm-data/networks/%s/type6' % vm_vif_params['device'], "none"): - network_changes.append('type6') - need_poweredoff = True - - if network_type6 and network_type6 == "static": - if network_ip6 and network_ip6 != vm_xenstore_data.get('vm-data/networks/%s/ip6' % vm_vif_params['device'], ""): - network_changes.append('ip6') - need_poweredoff = True - - if network_prefix6 and network_prefix6 != vm_xenstore_data.get('vm-data/networks/%s/prefix6' % vm_vif_params['device'], ""): - network_changes.append('prefix6') - need_poweredoff = True - - if network_gateway6 is not None and network_gateway6 != vm_xenstore_data.get('vm-data/networks/%s/gateway6' % - vm_vif_params['device'], ""): - network_changes.append('gateway6') - need_poweredoff = True - - config_changes_networks.append(network_changes) - # If this is a new VIF. - else: - if not network_name: - self.module.fail_json(msg="VM check networks[%s]: network name is required for new network interface!" % position) - - if network_type and network_type == "static" and network_ip and not network_netmask: - self.module.fail_json(msg="VM check networks[%s]: IPv4 netmask or prefix is required for new network interface!" % position) - - if network_type6 and network_type6 == "static" and network_ip6 and not network_prefix6: - self.module.fail_json(msg="VM check networks[%s]: IPv6 prefix is required for new network interface!" % position) - - # Restart is needed if we are adding new network - # interface with IP/gateway parameters specified - # and custom agent is used. - if self.vm_params['customization_agent'] == "custom": - for parameter in ['type', 'ip', 'prefix', 'gateway', 'type6', 'ip6', 'prefix6', 'gateway6']: - if network_params.get(parameter): - need_poweredoff = True - break - - if not vif_devices_allowed: - self.module.fail_json(msg="VM check networks[%s]: maximum number of network interfaces reached!" % position) - - # We need to place a new network interface right above the - # highest placed existing interface to maintain relative - # positions pairable with network interface specifications - # in module params. - vif_device = str(int(vif_device_highest) + 1) - - if vif_device not in vif_devices_allowed: - self.module.fail_json(msg="VM check networks[%s]: new network interface position %s is out of bounds!" % (position, vif_device)) - - vif_devices_allowed.remove(vif_device) - vif_device_highest = vif_device - - # For new VIFs we only track their position. - config_new_networks.append((position, vif_device)) - - # We should append config_changes_networks to config_changes only - # if there is at least one changed network, else skip. - for network_change in config_changes_networks: - if network_change: - config_changes.append({"networks_changed": config_changes_networks}) - break - - if config_new_networks: - config_changes.append({"networks_new": config_new_networks}) - - config_changes_custom_params = [] - - if self.module.params['custom_params']: - for position in range(len(self.module.params['custom_params'])): - custom_param = self.module.params['custom_params'][position] - - custom_param_key = custom_param['key'] - custom_param_value = custom_param['value'] - - if custom_param_key not in self.vm_params: - self.module.fail_json(msg="VM check custom_params[%s]: unknown VM param '%s'!" % (position, custom_param_key)) - - if custom_param_value != self.vm_params[custom_param_key]: - # We only need to track custom param position. - config_changes_custom_params.append(position) - - if config_changes_custom_params: - config_changes.append({"custom_params": config_changes_custom_params}) - - if need_poweredoff: - config_changes.append('need_poweredoff') - - return config_changes - - except XenAPI.Failure as f: - self.module.fail_json(msg="XAPI ERROR: %s" % f.details) - - def get_normalized_disk_size(self, disk_params, msg_prefix=""): - """Parses disk size parameters and returns disk size in bytes. - - This method tries to parse disk size module parameters. It fails - with an error message if size cannot be parsed. - - Args: - disk_params (dist): A dictionary with disk parameters. - msg_prefix (str): A string error messages should be prefixed - with (default: ""). - - Returns: - int: disk size in bytes if disk size is successfully parsed or - None if no disk size parameters were found. - """ - # There should be only single size spec but we make a list of all size - # specs just in case. Priority is given to 'size' but if not found, we - # check for 'size_tb', 'size_gb', 'size_mb' etc. and use first one - # found. - disk_size_spec = [x for x in disk_params.keys() if disk_params[x] is not None and (x.startswith('size_') or x == 'size')] - - if disk_size_spec: - try: - # size - if "size" in disk_size_spec: - size_regex = re.compile(r'(\d+(?:\.\d+)?)\s*(.*)') - disk_size_m = size_regex.match(disk_params['size']) - - if disk_size_m: - size = disk_size_m.group(1) - unit = disk_size_m.group(2) - else: - raise ValueError - # size_tb, size_gb, size_mb, size_kb, size_b - else: - size = disk_params[disk_size_spec[0]] - unit = disk_size_spec[0].split('_')[-1] - - if not unit: - unit = "b" - else: - unit = unit.lower() - - if re.match(r'\d+\.\d+', size): - # We found float value in string, let's typecast it. - if unit == "b": - # If we found float but unit is bytes, we get the integer part only. - size = int(float(size)) - else: - size = float(size) - else: - # We found int value in string, let's typecast it. - size = int(size) - - if not size or size < 0: - raise ValueError - - except (TypeError, ValueError, NameError): - # Common failure - self.module.fail_json(msg="%sfailed to parse disk size! Please review value provided using documentation." % msg_prefix) - - disk_units = dict(tb=4, gb=3, mb=2, kb=1, b=0) - - if unit in disk_units: - return int(size * (1024 ** disk_units[unit])) - else: - self.module.fail_json(msg="%s'%s' is not a supported unit for disk size! Supported units are ['%s']." % - (msg_prefix, unit, "', '".join(sorted(disk_units.keys(), key=lambda key: disk_units[key])))) - else: - return None - - @staticmethod - def get_cdrom_type(vm_cdrom_params): - """Returns VM CD-ROM type.""" - # TODO: implement support for detecting type host. No server to test - # this on at the moment. - if vm_cdrom_params['empty']: - return "none" - else: - return "iso" - - -def main(): - argument_spec = xenserver_common_argument_spec() - argument_spec.update( - state=dict(type='str', default='present', - choices=['present', 'absent', 'poweredon']), - name=dict(type='str', aliases=['name_label']), - name_desc=dict(type='str'), - uuid=dict(type='str'), - template=dict(type='str', aliases=['template_src']), - template_uuid=dict(type='str'), - is_template=dict(type='bool', default=False), - folder=dict(type='str'), - hardware=dict( - type='dict', - options=dict( - num_cpus=dict(type='int'), - num_cpu_cores_per_socket=dict(type='int'), - memory_mb=dict(type='int'), - ), - ), - disks=dict( - type='list', - elements='dict', - options=dict( - size=dict(type='str'), - size_tb=dict(type='str'), - size_gb=dict(type='str'), - size_mb=dict(type='str'), - size_kb=dict(type='str'), - size_b=dict(type='str'), - name=dict(type='str', aliases=['name_label']), - name_desc=dict(type='str'), - sr=dict(type='str'), - sr_uuid=dict(type='str'), - ), - aliases=['disk'], - mutually_exclusive=[ - ['size', 'size_tb', 'size_gb', 'size_mb', 'size_kb', 'size_b'], - ['sr', 'sr_uuid'], - ], - ), - cdrom=dict( - type='dict', - options=dict( - type=dict(type='str', choices=['none', 'iso']), - iso_name=dict(type='str'), - ), - required_if=[ - ['type', 'iso', ['iso_name']], - ], - ), - networks=dict( - type='list', - elements='dict', - options=dict( - name=dict(type='str', aliases=['name_label']), - mac=dict(type='str'), - type=dict(type='str', choices=['none', 'dhcp', 'static']), - ip=dict(type='str'), - netmask=dict(type='str'), - gateway=dict(type='str'), - type6=dict(type='str', choices=['none', 'dhcp', 'static']), - ip6=dict(type='str'), - gateway6=dict(type='str'), - ), - aliases=['network'], - required_if=[ - ['type', 'static', ['ip']], - ['type6', 'static', ['ip6']], - ], - ), - home_server=dict(type='str'), - custom_params=dict( - type='list', - elements='dict', - options=dict( - key=dict(type='str', required=True, no_log=False), - value=dict(type='raw', required=True), - ), - ), - wait_for_ip_address=dict(type='bool', default=False), - state_change_timeout=dict(type='int', default=0), - linked_clone=dict(type='bool', default=False), - force=dict(type='bool', default=False), - ) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=[ - ['name', 'uuid'], - ], - mutually_exclusive=[ - ['template', 'template_uuid'], - ], - ) - - result = {'failed': False, 'changed': False} - - vm = XenServerVM(module) - - # Find existing VM - if vm.exists(): - if module.params['state'] == "absent": - vm.destroy() - result['changed'] = True - elif module.params['state'] == "present": - config_changes = vm.reconfigure() - - if config_changes: - result['changed'] = True - - # Make new disk and network changes more user friendly - # and informative. - for change in config_changes: - if isinstance(change, dict): - if change.get('disks_new'): - disks_new = [] - - for position, userdevice in change['disks_new']: - disk_new_params = {"position": position, "vbd_userdevice": userdevice} - disk_params = module.params['disks'][position] - - for k in disk_params.keys(): - if disk_params[k] is not None: - disk_new_params[k] = disk_params[k] - - disks_new.append(disk_new_params) - - if disks_new: - change['disks_new'] = disks_new - - elif change.get('networks_new'): - networks_new = [] - - for position, device in change['networks_new']: - network_new_params = {"position": position, "vif_device": device} - network_params = module.params['networks'][position] - - for k in network_params.keys(): - if network_params[k] is not None: - network_new_params[k] = network_params[k] - - networks_new.append(network_new_params) - - if networks_new: - change['networks_new'] = networks_new - - result['changes'] = config_changes - - elif module.params['state'] in ["poweredon", "poweredoff", "restarted", "shutdownguest", "rebootguest", "suspended"]: - result['changed'] = vm.set_power_state(module.params['state']) - elif module.params['state'] != "absent": - vm.deploy() - result['changed'] = True - - if module.params['wait_for_ip_address'] and module.params['state'] != "absent": - vm.wait_for_ip_address() - - result['instance'] = vm.gather_facts() - - if result['failed']: - module.fail_json(**result) - else: - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_info.py b/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_info.py deleted file mode 100644 index a2e77725..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_info.py +++ /dev/null @@ -1,222 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2018, Bojan Vitnik -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: xenserver_guest_info -short_description: Gathers information for virtual machines running on Citrix Hypervisor/XenServer host or pool -description: > - This module can be used to gather essential VM facts. -author: -- Bojan Vitnik (@bvitnik) -notes: -- Minimal supported version of XenServer is 5.6. -- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0. -- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside - Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your - Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub: - U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)' -- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are - accessing XenServer host in trusted environment or use C(https://) scheme explicitly.' -- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no) - which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.' -- This module was called C(xenserver_guest_facts) before Ansible 2.9. The usage did not change. -requirements: -- python >= 2.6 -- XenAPI -options: - name: - description: - - Name of the VM to gather facts from. - - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found. - - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage. - - This parameter is case sensitive. - type: str - aliases: [ name_label ] - uuid: - description: - - UUID of the VM to gather fact of. This is XenServer's unique identifier. - - It is required if name is not unique. - type: str -extends_documentation_fragment: -- community.general.xenserver.documentation - -''' - -EXAMPLES = r''' -- name: Gather facts - community.general.xenserver_guest_info: - hostname: "{{ xenserver_hostname }}" - username: "{{ xenserver_username }}" - password: "{{ xenserver_password }}" - name: testvm_11 - delegate_to: localhost - register: facts -''' - -RETURN = r''' -instance: - description: Metadata about the VM - returned: always - type: dict - sample: { - "cdrom": { - "type": "none" - }, - "customization_agent": "native", - "disks": [ - { - "name": "testvm_11-0", - "name_desc": "", - "os_device": "xvda", - "size": 42949672960, - "sr": "Local storage", - "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", - "vbd_userdevice": "0" - }, - { - "name": "testvm_11-1", - "name_desc": "", - "os_device": "xvdb", - "size": 42949672960, - "sr": "Local storage", - "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", - "vbd_userdevice": "1" - } - ], - "domid": "56", - "folder": "", - "hardware": { - "memory_mb": 8192, - "num_cpu_cores_per_socket": 2, - "num_cpus": 4 - }, - "home_server": "", - "is_template": false, - "name": "testvm_11", - "name_desc": "", - "networks": [ - { - "gateway": "192.168.0.254", - "gateway6": "fc00::fffe", - "ip": "192.168.0.200", - "ip6": [ - "fe80:0000:0000:0000:e9cb:625a:32c5:c291", - "fc00:0000:0000:0000:0000:0000:0000:0001" - ], - "mac": "ba:91:3a:48:20:76", - "mtu": "1500", - "name": "Pool-wide network associated with eth1", - "netmask": "255.255.255.128", - "prefix": "25", - "prefix6": "64", - "vif_device": "0" - } - ], - "other_config": { - "base_template_name": "Windows Server 2016 (64-bit)", - "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5", - "install-methods": "cdrom", - "instant": "true", - "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e" - }, - "platform": { - "acpi": "1", - "apic": "true", - "cores-per-socket": "2", - "device_id": "0002", - "hpet": "true", - "nx": "true", - "pae": "true", - "timeoffset": "-25200", - "vga": "std", - "videoram": "8", - "viridian": "true", - "viridian_reference_tsc": "true", - "viridian_time_ref_count": "true" - }, - "state": "poweredon", - "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda", - "xenstore_data": { - "vm-data": "" - } - } -''' - -HAS_XENAPI = False -try: - import XenAPI - HAS_XENAPI = True -except ImportError: - pass - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref, - gather_vm_params, gather_vm_facts) - - -class XenServerVM(XenServerObject): - """Class for managing XenServer VM. - - Attributes: - vm_ref (str): XAPI reference to VM. - vm_params (dict): A dictionary with VM parameters as returned - by gather_vm_params() function. - """ - - def __init__(self, module): - """Inits XenServerVM using module parameters. - - Args: - module: Reference to AnsibleModule object. - """ - super(XenServerVM, self).__init__(module) - - self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=True, msg_prefix="VM search: ") - self.gather_params() - - def gather_params(self): - """Gathers all VM parameters available in XAPI database.""" - self.vm_params = gather_vm_params(self.module, self.vm_ref) - - def gather_facts(self): - """Gathers and returns VM facts.""" - return gather_vm_facts(self.module, self.vm_params) - - -def main(): - argument_spec = xenserver_common_argument_spec() - argument_spec.update( - name=dict(type='str', aliases=['name_label']), - uuid=dict(type='str'), - ) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=[ - ['name', 'uuid'], - ], - ) - - result = {'failed': False, 'changed': False} - - # Module will exit with an error message if no VM is found. - vm = XenServerVM(module) - - # Gather facts. - result['instance'] = vm.gather_facts() - - if result['failed']: - module.fail_json(**result) - else: - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_powerstate.py b/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_powerstate.py deleted file mode 100644 index 4a195ff5..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_powerstate.py +++ /dev/null @@ -1,270 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2018, Bojan Vitnik -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: xenserver_guest_powerstate -short_description: Manages power states of virtual machines running on Citrix Hypervisor/XenServer host or pool -description: > - This module can be used to power on, power off, restart or suspend virtual machine and gracefully reboot or shutdown guest OS of virtual machine. -author: -- Bojan Vitnik (@bvitnik) -notes: -- Minimal supported version of XenServer is 5.6. -- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0. -- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside - Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your - Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub: - U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)' -- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are - accessing XenServer host in trusted environment or use C(https://) scheme explicitly.' -- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no) - which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.' -requirements: -- python >= 2.6 -- XenAPI -options: - state: - description: - - Specify the state VM should be in. - - If C(state) is set to value other than C(present), then VM is transitioned into required state and facts are returned. - - If C(state) is set to C(present), then VM is just checked for existence and facts are returned. - type: str - default: present - choices: [ powered-on, powered-off, restarted, shutdown-guest, reboot-guest, suspended, present ] - name: - description: - - Name of the VM to manage. - - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found. - - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage. - - This parameter is case sensitive. - type: str - aliases: [ name_label ] - uuid: - description: - - UUID of the VM to manage if known. This is XenServer's unique identifier. - - It is required if name is not unique. - type: str - wait_for_ip_address: - description: - - Wait until XenServer detects an IP address for the VM. - - This requires XenServer Tools to be preinstalled on the VM to work properly. - type: bool - default: no - state_change_timeout: - description: - - 'By default, module will wait indefinitely for VM to change state or acquire an IP address if C(wait_for_ip_address: yes).' - - If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change. - - In case of timeout, module will generate an error message. - type: int - default: 0 -extends_documentation_fragment: -- community.general.xenserver.documentation - -''' - -EXAMPLES = r''' -- name: Power on VM - community.general.xenserver_guest_powerstate: - hostname: "{{ xenserver_hostname }}" - username: "{{ xenserver_username }}" - password: "{{ xenserver_password }}" - name: testvm_11 - state: powered-on - delegate_to: localhost - register: facts -''' - -RETURN = r''' -instance: - description: Metadata about the VM - returned: always - type: dict - sample: { - "cdrom": { - "type": "none" - }, - "customization_agent": "native", - "disks": [ - { - "name": "windows-template-testing-0", - "name_desc": "", - "os_device": "xvda", - "size": 42949672960, - "sr": "Local storage", - "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", - "vbd_userdevice": "0" - }, - { - "name": "windows-template-testing-1", - "name_desc": "", - "os_device": "xvdb", - "size": 42949672960, - "sr": "Local storage", - "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", - "vbd_userdevice": "1" - } - ], - "domid": "56", - "folder": "", - "hardware": { - "memory_mb": 8192, - "num_cpu_cores_per_socket": 2, - "num_cpus": 4 - }, - "home_server": "", - "is_template": false, - "name": "windows-template-testing", - "name_desc": "", - "networks": [ - { - "gateway": "192.168.0.254", - "gateway6": "fc00::fffe", - "ip": "192.168.0.200", - "ip6": [ - "fe80:0000:0000:0000:e9cb:625a:32c5:c291", - "fc00:0000:0000:0000:0000:0000:0000:0001" - ], - "mac": "ba:91:3a:48:20:76", - "mtu": "1500", - "name": "Pool-wide network associated with eth1", - "netmask": "255.255.255.128", - "prefix": "25", - "prefix6": "64", - "vif_device": "0" - } - ], - "other_config": { - "base_template_name": "Windows Server 2016 (64-bit)", - "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5", - "install-methods": "cdrom", - "instant": "true", - "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e" - }, - "platform": { - "acpi": "1", - "apic": "true", - "cores-per-socket": "2", - "device_id": "0002", - "hpet": "true", - "nx": "true", - "pae": "true", - "timeoffset": "-25200", - "vga": "std", - "videoram": "8", - "viridian": "true", - "viridian_reference_tsc": "true", - "viridian_time_ref_count": "true" - }, - "state": "poweredon", - "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda", - "xenstore_data": { - "vm-data": "" - } - } -''' - -import re - -HAS_XENAPI = False -try: - import XenAPI - HAS_XENAPI = True -except ImportError: - pass - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref, - gather_vm_params, gather_vm_facts, set_vm_power_state, - wait_for_vm_ip_address) - - -class XenServerVM(XenServerObject): - """Class for managing XenServer VM. - - Attributes: - vm_ref (str): XAPI reference to VM. - vm_params (dict): A dictionary with VM parameters as returned - by gather_vm_params() function. - """ - - def __init__(self, module): - """Inits XenServerVM using module parameters. - - Args: - module: Reference to Ansible module object. - """ - super(XenServerVM, self).__init__(module) - - self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=True, msg_prefix="VM search: ") - self.gather_params() - - def gather_params(self): - """Gathers all VM parameters available in XAPI database.""" - self.vm_params = gather_vm_params(self.module, self.vm_ref) - - def gather_facts(self): - """Gathers and returns VM facts.""" - return gather_vm_facts(self.module, self.vm_params) - - def set_power_state(self, power_state): - """Controls VM power state.""" - state_changed, current_state = set_vm_power_state(self.module, self.vm_ref, power_state, self.module.params['state_change_timeout']) - - # If state has changed, update vm_params. - if state_changed: - self.vm_params['power_state'] = current_state.capitalize() - - return state_changed - - def wait_for_ip_address(self): - """Waits for VM to acquire an IP address.""" - self.vm_params['guest_metrics'] = wait_for_vm_ip_address(self.module, self.vm_ref, self.module.params['state_change_timeout']) - - -def main(): - argument_spec = xenserver_common_argument_spec() - argument_spec.update( - state=dict(type='str', default='present', - choices=['powered-on', 'powered-off', 'restarted', 'shutdown-guest', 'reboot-guest', 'suspended', 'present']), - name=dict(type='str', aliases=['name_label']), - uuid=dict(type='str'), - wait_for_ip_address=dict(type='bool', default=False), - state_change_timeout=dict(type='int', default=0), - ) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=[ - ['name', 'uuid'], - ], - ) - - result = {'failed': False, 'changed': False} - - # Module will exit with an error message if no VM is found. - vm = XenServerVM(module) - - # Set VM power state. - if module.params['state'] != "present": - result['changed'] = vm.set_power_state(module.params['state']) - - if module.params['wait_for_ip_address']: - vm.wait_for_ip_address() - - result['instance'] = vm.gather_facts() - - if result['failed']: - module.fail_json(**result) - else: - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cloud_init_data_facts.py b/ansible_collections/community/general/plugins/modules/cloud_init_data_facts.py deleted file mode 120000 index bbdeea16..00000000 --- a/ansible_collections/community/general/plugins/modules/cloud_init_data_facts.py +++ /dev/null @@ -1 +0,0 @@ -cloud/misc/cloud_init_data_facts.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/cloudflare_dns.py b/ansible_collections/community/general/plugins/modules/cloudflare_dns.py deleted file mode 120000 index bc779a4e..00000000 --- a/ansible_collections/community/general/plugins/modules/cloudflare_dns.py +++ /dev/null @@ -1 +0,0 @@ -net_tools/cloudflare_dns.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/clustering/consul/consul.py b/ansible_collections/community/general/plugins/modules/clustering/consul/consul.py deleted file mode 100644 index 9dc1a771..00000000 --- a/ansible_collections/community/general/plugins/modules/clustering/consul/consul.py +++ /dev/null @@ -1,606 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2015, Steve Gargan -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: consul -short_description: "Add, modify & delete services within a consul cluster." -description: - - Registers services and checks for an agent with a consul cluster. - A service is some process running on the agent node that should be advertised by - consul's discovery mechanism. It may optionally supply a check definition, - a periodic service test to notify the consul cluster of service's health. - - "Checks may also be registered per node e.g. disk usage, or cpu usage and - notify the health of the entire node to the cluster. - Service level checks do not require a check name or id as these are derived - by Consul from the Service name and id respectively by appending 'service:' - Node level checks require a I(check_name) and optionally a I(check_id)." - - Currently, there is no complete way to retrieve the script, interval or ttl - metadata for a registered check. Without this metadata it is not possible to - tell if the data supplied with ansible represents a change to a check. As a - result this does not attempt to determine changes and will always report a - changed occurred. An API method is planned to supply this metadata so at that - stage change management will be added. - - "See U(http://consul.io) for more details." -requirements: - - python-consul - - requests -author: "Steve Gargan (@sgargan)" -options: - state: - type: str - description: - - register or deregister the consul service, defaults to present - default: present - choices: ['present', 'absent'] - service_name: - type: str - description: - - Unique name for the service on a node, must be unique per node, - required if registering a service. May be omitted if registering - a node level check - service_id: - type: str - description: - - the ID for the service, must be unique per node. If I(state=absent), - defaults to the service name if supplied. - host: - type: str - description: - - host of the consul agent defaults to localhost - default: localhost - port: - type: int - description: - - the port on which the consul agent is running - default: 8500 - scheme: - type: str - description: - - the protocol scheme on which the consul agent is running - default: http - validate_certs: - description: - - whether to verify the TLS certificate of the consul agent - type: bool - default: 'yes' - notes: - type: str - description: - - Notes to attach to check when registering it. - service_port: - type: int - description: - - the port on which the service is listening. Can optionally be supplied for - registration of a service, i.e. if I(service_name) or I(service_id) is set - service_address: - type: str - description: - - the address to advertise that the service will be listening on. - This value will be passed as the I(address) parameter to Consul's - C(/v1/agent/service/register) API method, so refer to the Consul API - documentation for further details. - tags: - type: list - elements: str - description: - - tags that will be attached to the service registration. - script: - type: str - description: - - the script/command that will be run periodically to check the health - of the service. Scripts require I(interval) and vice versa. - interval: - type: str - description: - - the interval at which the service check will be run. This is a number - with a s or m suffix to signify the units of seconds or minutes e.g - C(15s) or C(1m). If no suffix is supplied, m will be used by default e.g. - C(1) will be C(1m). Required if the I(script) parameter is specified. - check_id: - type: str - description: - - an ID for the service check. If I(state=absent), defaults to - I(check_name). Ignored if part of a service definition. - check_name: - type: str - description: - - a name for the service check. Required if standalone, ignored if - part of service definition. - ttl: - type: str - description: - - checks can be registered with a ttl instead of a I(script) and I(interval) - this means that the service will check in with the agent before the - ttl expires. If it doesn't the check will be considered failed. - Required if registering a check and the script an interval are missing - Similar to the interval this is a number with a s or m suffix to - signify the units of seconds or minutes e.g C(15s) or C(1m). If no suffix - is supplied, C(m) will be used by default e.g. C(1) will be C(1m) - tcp: - type: str - description: - - Checks can be registered with a TCP port. This means that consul - will check if the connection attempt to that port is successful (that is, the port is currently accepting connections). - The format is C(host:port), for example C(localhost:80). - I(interval) must also be provided with this option. - version_added: '1.3.0' - http: - type: str - description: - - checks can be registered with an HTTP endpoint. This means that consul - will check that the http endpoint returns a successful HTTP status. - I(interval) must also be provided with this option. - timeout: - type: str - description: - - A custom HTTP check timeout. The consul default is 10 seconds. - Similar to the interval this is a number with a C(s) or C(m) suffix to - signify the units of seconds or minutes, e.g. C(15s) or C(1m). - token: - type: str - description: - - the token key identifying an ACL rule set. May be required to register services. -''' - -EXAMPLES = ''' -- name: Register nginx service with the local consul agent - community.general.consul: - service_name: nginx - service_port: 80 - -- name: Register nginx service with curl check - community.general.consul: - service_name: nginx - service_port: 80 - script: curl http://localhost - interval: 60s - -- name: register nginx with a tcp check - community.general.consul: - service_name: nginx - service_port: 80 - interval: 60s - tcp: localhost:80 - -- name: Register nginx with an http check - community.general.consul: - service_name: nginx - service_port: 80 - interval: 60s - http: http://localhost:80/status - -- name: Register external service nginx available at 10.1.5.23 - community.general.consul: - service_name: nginx - service_port: 80 - service_address: 10.1.5.23 - -- name: Register nginx with some service tags - community.general.consul: - service_name: nginx - service_port: 80 - tags: - - prod - - webservers - -- name: Remove nginx service - community.general.consul: - service_name: nginx - state: absent - -- name: Register celery worker service - community.general.consul: - service_name: celery-worker - tags: - - prod - - worker - -- name: Create a node level check to test disk usage - community.general.consul: - check_name: Disk usage - check_id: disk_usage - script: /opt/disk_usage.py - interval: 5m - -- name: Register an http check against a service that's already registered - community.general.consul: - check_name: nginx-check2 - check_id: nginx-check2 - service_id: nginx - interval: 60s - http: http://localhost:80/morestatus -''' - -try: - import consul - from requests.exceptions import ConnectionError - - class PatchedConsulAgentService(consul.Consul.Agent.Service): - def deregister(self, service_id, token=None): - params = {} - if token: - params['token'] = token - return self.agent.http.put(consul.base.CB.bool(), - '/v1/agent/service/deregister/%s' % service_id, - params=params) - - python_consul_installed = True -except ImportError: - python_consul_installed = False - -import re -from ansible.module_utils.basic import AnsibleModule - - -def register_with_consul(module): - state = module.params.get('state') - - if state == 'present': - add(module) - else: - remove(module) - - -def add(module): - ''' adds a service or a check depending on supplied configuration''' - check = parse_check(module) - service = parse_service(module) - - if not service and not check: - module.fail_json(msg='a name and port are required to register a service') - - if service: - if check: - service.add_check(check) - add_service(module, service) - elif check: - add_check(module, check) - - -def remove(module): - ''' removes a service or a check ''' - service_id = module.params.get('service_id') or module.params.get('service_name') - check_id = module.params.get('check_id') or module.params.get('check_name') - if not (service_id or check_id): - module.fail_json(msg='services and checks are removed by id or name. please supply a service id/name or a check id/name') - if service_id: - remove_service(module, service_id) - else: - remove_check(module, check_id) - - -def add_check(module, check): - ''' registers a check with the given agent. currently there is no way - retrieve the full metadata of an existing check through the consul api. - Without this we can't compare to the supplied check and so we must assume - a change. ''' - if not check.name and not check.service_id: - module.fail_json(msg='a check name is required for a node level check, one not attached to a service') - - consul_api = get_consul_api(module) - check.register(consul_api) - - module.exit_json(changed=True, - check_id=check.check_id, - check_name=check.name, - script=check.script, - interval=check.interval, - ttl=check.ttl, - tcp=check.tcp, - http=check.http, - timeout=check.timeout, - service_id=check.service_id) - - -def remove_check(module, check_id): - ''' removes a check using its id ''' - consul_api = get_consul_api(module) - - if check_id in consul_api.agent.checks(): - consul_api.agent.check.deregister(check_id) - module.exit_json(changed=True, id=check_id) - - module.exit_json(changed=False, id=check_id) - - -def add_service(module, service): - ''' registers a service with the current agent ''' - result = service - changed = False - - consul_api = get_consul_api(module) - existing = get_service_by_id_or_name(consul_api, service.id) - - # there is no way to retrieve the details of checks so if a check is present - # in the service it must be re-registered - if service.has_checks() or not existing or not existing == service: - - service.register(consul_api) - # check that it registered correctly - registered = get_service_by_id_or_name(consul_api, service.id) - if registered: - result = registered - changed = True - - module.exit_json(changed=changed, - service_id=result.id, - service_name=result.name, - service_port=result.port, - checks=[check.to_dict() for check in service.checks], - tags=result.tags) - - -def remove_service(module, service_id): - ''' deregister a service from the given agent using its service id ''' - consul_api = get_consul_api(module) - service = get_service_by_id_or_name(consul_api, service_id) - if service: - consul_api.agent.service.deregister(service_id, token=module.params.get('token')) - module.exit_json(changed=True, id=service_id) - - module.exit_json(changed=False, id=service_id) - - -def get_consul_api(module): - consulClient = consul.Consul(host=module.params.get('host'), - port=module.params.get('port'), - scheme=module.params.get('scheme'), - verify=module.params.get('validate_certs'), - token=module.params.get('token')) - consulClient.agent.service = PatchedConsulAgentService(consulClient) - return consulClient - - -def get_service_by_id_or_name(consul_api, service_id_or_name): - ''' iterate the registered services and find one with the given id ''' - for name, service in consul_api.agent.services().items(): - if service['ID'] == service_id_or_name or service['Service'] == service_id_or_name: - return ConsulService(loaded=service) - - -def parse_check(module): - if len([p for p in (module.params.get('script'), module.params.get('ttl'), module.params.get('tcp'), module.params.get('http')) if p]) > 1: - module.fail_json( - msg='checks are either script, tcp, http or ttl driven, supplying more than one does not make sense') - - if module.params.get('check_id') or module.params.get('script') or module.params.get('ttl') or module.params.get('tcp') or module.params.get('http'): - - return ConsulCheck( - module.params.get('check_id'), - module.params.get('check_name'), - module.params.get('check_node'), - module.params.get('check_host'), - module.params.get('script'), - module.params.get('interval'), - module.params.get('ttl'), - module.params.get('notes'), - module.params.get('tcp'), - module.params.get('http'), - module.params.get('timeout'), - module.params.get('service_id'), - ) - - -def parse_service(module): - if module.params.get('service_name'): - return ConsulService( - module.params.get('service_id'), - module.params.get('service_name'), - module.params.get('service_address'), - module.params.get('service_port'), - module.params.get('tags'), - ) - elif not module.params.get('service_name'): - module.fail_json(msg="service_name is required to configure a service.") - - -class ConsulService(object): - - def __init__(self, service_id=None, name=None, address=None, port=-1, - tags=None, loaded=None): - self.id = self.name = name - if service_id: - self.id = service_id - self.address = address - self.port = port - self.tags = tags - self.checks = [] - if loaded: - self.id = loaded['ID'] - self.name = loaded['Service'] - self.port = loaded['Port'] - self.tags = loaded['Tags'] - - def register(self, consul_api): - optional = {} - - if self.port: - optional['port'] = self.port - - if len(self.checks) > 0: - optional['check'] = self.checks[0].check - - consul_api.agent.service.register( - self.name, - service_id=self.id, - address=self.address, - tags=self.tags, - **optional) - - def add_check(self, check): - self.checks.append(check) - - def checks(self): - return self.checks - - def has_checks(self): - return len(self.checks) > 0 - - def __eq__(self, other): - return (isinstance(other, self.__class__) and - self.id == other.id and - self.name == other.name and - self.port == other.port and - self.tags == other.tags) - - def __ne__(self, other): - return not self.__eq__(other) - - def to_dict(self): - data = {'id': self.id, "name": self.name} - if self.port: - data['port'] = self.port - if self.tags and len(self.tags) > 0: - data['tags'] = self.tags - if len(self.checks) > 0: - data['check'] = self.checks[0].to_dict() - return data - - -class ConsulCheck(object): - - def __init__(self, check_id, name, node=None, host='localhost', - script=None, interval=None, ttl=None, notes=None, tcp=None, http=None, timeout=None, service_id=None): - self.check_id = self.name = name - if check_id: - self.check_id = check_id - self.service_id = service_id - self.notes = notes - self.node = node - self.host = host - - self.interval = self.validate_duration('interval', interval) - self.ttl = self.validate_duration('ttl', ttl) - self.script = script - self.tcp = tcp - self.http = http - self.timeout = self.validate_duration('timeout', timeout) - - self.check = None - - if script: - self.check = consul.Check.script(script, self.interval) - - if ttl: - self.check = consul.Check.ttl(self.ttl) - - if http: - if interval is None: - raise Exception('http check must specify interval') - - self.check = consul.Check.http(http, self.interval, self.timeout) - - if tcp: - if interval is None: - raise Exception('tcp check must specify interval') - - regex = r"(?P.*)(?::)(?P(?:[0-9]+))$" - match = re.match(regex, tcp) - - if match is None: - raise Exception('tcp check must be in host:port format') - - self.check = consul.Check.tcp(match.group('host').strip('[]'), int(match.group('port')), self.interval) - - def validate_duration(self, name, duration): - if duration: - duration_units = ['ns', 'us', 'ms', 's', 'm', 'h'] - if not any((duration.endswith(suffix) for suffix in duration_units)): - duration = "{0}s".format(duration) - return duration - - def register(self, consul_api): - consul_api.agent.check.register(self.name, check_id=self.check_id, service_id=self.service_id, - notes=self.notes, - check=self.check) - - def __eq__(self, other): - return (isinstance(other, self.__class__) and - self.check_id == other.check_id and - self.service_id == other.service_id and - self.name == other.name and - self.script == other.script and - self.interval == other.interval) - - def __ne__(self, other): - return not self.__eq__(other) - - def to_dict(self): - data = {} - self._add(data, 'id', attr='check_id') - self._add(data, 'name', attr='check_name') - self._add(data, 'script') - self._add(data, 'node') - self._add(data, 'notes') - self._add(data, 'host') - self._add(data, 'interval') - self._add(data, 'ttl') - self._add(data, 'tcp') - self._add(data, 'http') - self._add(data, 'timeout') - self._add(data, 'service_id') - return data - - def _add(self, data, key, attr=None): - try: - if attr is None: - attr = key - data[key] = getattr(self, attr) - except Exception: - pass - - -def test_dependencies(module): - if not python_consul_installed: - module.fail_json(msg="python-consul required for this module. see https://python-consul.readthedocs.io/en/latest/#installation") - - -def main(): - module = AnsibleModule( - argument_spec=dict( - host=dict(default='localhost'), - port=dict(default=8500, type='int'), - scheme=dict(default='http'), - validate_certs=dict(default=True, type='bool'), - check_id=dict(), - check_name=dict(), - check_node=dict(), - check_host=dict(), - notes=dict(), - script=dict(), - service_id=dict(), - service_name=dict(), - service_address=dict(type='str'), - service_port=dict(type='int'), - state=dict(default='present', choices=['present', 'absent']), - interval=dict(type='str'), - ttl=dict(type='str'), - tcp=dict(type='str'), - http=dict(type='str'), - timeout=dict(type='str'), - tags=dict(type='list', elements='str'), - token=dict(no_log=True) - ), - supports_check_mode=False, - ) - - test_dependencies(module) - - try: - register_with_consul(module) - except ConnectionError as e: - module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( - module.params.get('host'), module.params.get('port'), str(e))) - except Exception as e: - module.fail_json(msg=str(e)) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/clustering/consul/consul_acl.py b/ansible_collections/community/general/plugins/modules/clustering/consul/consul_acl.py deleted file mode 100644 index 1e01e58a..00000000 --- a/ansible_collections/community/general/plugins/modules/clustering/consul/consul_acl.py +++ /dev/null @@ -1,683 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2015, Steve Gargan -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: consul_acl -short_description: Manipulate Consul ACL keys and rules -description: - - Allows the addition, modification and deletion of ACL keys and associated - rules in a consul cluster via the agent. For more details on using and - configuring ACLs, see https://www.consul.io/docs/guides/acl.html. -author: - - Steve Gargan (@sgargan) - - Colin Nolan (@colin-nolan) -options: - mgmt_token: - description: - - a management token is required to manipulate the acl lists - required: true - type: str - state: - description: - - whether the ACL pair should be present or absent - required: false - choices: ['present', 'absent'] - default: present - type: str - token_type: - description: - - the type of token that should be created - choices: ['client', 'management'] - default: client - type: str - name: - description: - - the name that should be associated with the acl key, this is opaque - to Consul - required: false - type: str - token: - description: - - the token key identifying an ACL rule set. If generated by consul - this will be a UUID - required: false - type: str - rules: - type: list - elements: dict - description: - - rules that should be associated with a given token - required: false - host: - description: - - host of the consul agent defaults to localhost - required: false - default: localhost - type: str - port: - type: int - description: - - the port on which the consul agent is running - required: false - default: 8500 - scheme: - description: - - the protocol scheme on which the consul agent is running - required: false - default: http - type: str - validate_certs: - type: bool - description: - - whether to verify the tls certificate of the consul agent - required: false - default: True -requirements: - - python-consul - - pyhcl - - requests -''' - -EXAMPLES = """ -- name: Create an ACL with rules - community.general.consul_acl: - host: consul1.example.com - mgmt_token: some_management_acl - name: Foo access - rules: - - key: "foo" - policy: read - - key: "private/foo" - policy: deny - -- name: Create an ACL with a specific token - community.general.consul_acl: - host: consul1.example.com - mgmt_token: some_management_acl - name: Foo access - token: my-token - rules: - - key: "foo" - policy: read - -- name: Update the rules associated to an ACL token - community.general.consul_acl: - host: consul1.example.com - mgmt_token: some_management_acl - name: Foo access - token: some_client_token - rules: - - event: "bbq" - policy: write - - key: "foo" - policy: read - - key: "private" - policy: deny - - keyring: write - - node: "hgs4" - policy: write - - operator: read - - query: "" - policy: write - - service: "consul" - policy: write - - session: "standup" - policy: write - -- name: Remove a token - community.general.consul_acl: - host: consul1.example.com - mgmt_token: some_management_acl - token: 172bd5c8-9fe9-11e4-b1b0-3c15c2c9fd5e - state: absent -""" - -RETURN = """ -token: - description: the token associated to the ACL (the ACL's ID) - returned: success - type: str - sample: a2ec332f-04cf-6fba-e8b8-acf62444d3da -rules: - description: the HCL JSON representation of the rules associated to the ACL, in the format described in the - Consul documentation (https://www.consul.io/docs/guides/acl.html#rule-specification). - returned: I(status) == "present" - type: str - sample: { - "key": { - "foo": { - "policy": "write" - }, - "bar": { - "policy": "deny" - } - } - } -operation: - description: the operation performed on the ACL - returned: changed - type: str - sample: update -""" - - -try: - import consul - python_consul_installed = True -except ImportError: - python_consul_installed = False - -try: - import hcl - pyhcl_installed = True -except ImportError: - pyhcl_installed = False - -try: - from requests.exceptions import ConnectionError - has_requests = True -except ImportError: - has_requests = False - -from collections import defaultdict -from ansible.module_utils.basic import to_text, AnsibleModule - - -RULE_SCOPES = [ - "agent", - "agent_prefix", - "event", - "event_prefix", - "key", - "key_prefix", - "keyring", - "node", - "node_prefix", - "operator", - "query", - "query_prefix", - "service", - "service_prefix", - "session", - "session_prefix", -] - -MANAGEMENT_PARAMETER_NAME = "mgmt_token" -HOST_PARAMETER_NAME = "host" -SCHEME_PARAMETER_NAME = "scheme" -VALIDATE_CERTS_PARAMETER_NAME = "validate_certs" -NAME_PARAMETER_NAME = "name" -PORT_PARAMETER_NAME = "port" -RULES_PARAMETER_NAME = "rules" -STATE_PARAMETER_NAME = "state" -TOKEN_PARAMETER_NAME = "token" -TOKEN_TYPE_PARAMETER_NAME = "token_type" - -PRESENT_STATE_VALUE = "present" -ABSENT_STATE_VALUE = "absent" - -CLIENT_TOKEN_TYPE_VALUE = "client" -MANAGEMENT_TOKEN_TYPE_VALUE = "management" - -REMOVE_OPERATION = "remove" -UPDATE_OPERATION = "update" -CREATE_OPERATION = "create" - -_POLICY_JSON_PROPERTY = "policy" -_RULES_JSON_PROPERTY = "Rules" -_TOKEN_JSON_PROPERTY = "ID" -_TOKEN_TYPE_JSON_PROPERTY = "Type" -_NAME_JSON_PROPERTY = "Name" -_POLICY_YML_PROPERTY = "policy" -_POLICY_HCL_PROPERTY = "policy" - -_ARGUMENT_SPEC = { - MANAGEMENT_PARAMETER_NAME: dict(required=True, no_log=True), - HOST_PARAMETER_NAME: dict(default='localhost'), - SCHEME_PARAMETER_NAME: dict(default='http'), - VALIDATE_CERTS_PARAMETER_NAME: dict(type='bool', default=True), - NAME_PARAMETER_NAME: dict(), - PORT_PARAMETER_NAME: dict(default=8500, type='int'), - RULES_PARAMETER_NAME: dict(type='list', elements='dict'), - STATE_PARAMETER_NAME: dict(default=PRESENT_STATE_VALUE, choices=[PRESENT_STATE_VALUE, ABSENT_STATE_VALUE]), - TOKEN_PARAMETER_NAME: dict(no_log=False), - TOKEN_TYPE_PARAMETER_NAME: dict(choices=[CLIENT_TOKEN_TYPE_VALUE, MANAGEMENT_TOKEN_TYPE_VALUE], - default=CLIENT_TOKEN_TYPE_VALUE) -} - - -def set_acl(consul_client, configuration): - """ - Sets an ACL based on the given configuration. - :param consul_client: the consul client - :param configuration: the run configuration - :return: the output of setting the ACL - """ - acls_as_json = decode_acls_as_json(consul_client.acl.list()) - existing_acls_mapped_by_name = dict((acl.name, acl) for acl in acls_as_json if acl.name is not None) - existing_acls_mapped_by_token = dict((acl.token, acl) for acl in acls_as_json) - if None in existing_acls_mapped_by_token: - raise AssertionError("expecting ACL list to be associated to a token: %s" % - existing_acls_mapped_by_token[None]) - - if configuration.token is None and configuration.name and configuration.name in existing_acls_mapped_by_name: - # No token but name given so can get token from name - configuration.token = existing_acls_mapped_by_name[configuration.name].token - - if configuration.token and configuration.token in existing_acls_mapped_by_token: - return update_acl(consul_client, configuration) - else: - if configuration.token in existing_acls_mapped_by_token: - raise AssertionError() - if configuration.name in existing_acls_mapped_by_name: - raise AssertionError() - return create_acl(consul_client, configuration) - - -def update_acl(consul_client, configuration): - """ - Updates an ACL. - :param consul_client: the consul client - :param configuration: the run configuration - :return: the output of the update - """ - existing_acl = load_acl_with_token(consul_client, configuration.token) - changed = existing_acl.rules != configuration.rules - - if changed: - name = configuration.name if configuration.name is not None else existing_acl.name - rules_as_hcl = encode_rules_as_hcl_string(configuration.rules) - updated_token = consul_client.acl.update( - configuration.token, name=name, type=configuration.token_type, rules=rules_as_hcl) - if updated_token != configuration.token: - raise AssertionError() - - return Output(changed=changed, token=configuration.token, rules=configuration.rules, operation=UPDATE_OPERATION) - - -def create_acl(consul_client, configuration): - """ - Creates an ACL. - :param consul_client: the consul client - :param configuration: the run configuration - :return: the output of the creation - """ - rules_as_hcl = encode_rules_as_hcl_string(configuration.rules) if len(configuration.rules) > 0 else None - token = consul_client.acl.create( - name=configuration.name, type=configuration.token_type, rules=rules_as_hcl, acl_id=configuration.token) - rules = configuration.rules - return Output(changed=True, token=token, rules=rules, operation=CREATE_OPERATION) - - -def remove_acl(consul, configuration): - """ - Removes an ACL. - :param consul: the consul client - :param configuration: the run configuration - :return: the output of the removal - """ - token = configuration.token - changed = consul.acl.info(token) is not None - if changed: - consul.acl.destroy(token) - return Output(changed=changed, token=token, operation=REMOVE_OPERATION) - - -def load_acl_with_token(consul, token): - """ - Loads the ACL with the given token (token == rule ID). - :param consul: the consul client - :param token: the ACL "token"/ID (not name) - :return: the ACL associated to the given token - :exception ConsulACLTokenNotFoundException: raised if the given token does not exist - """ - acl_as_json = consul.acl.info(token) - if acl_as_json is None: - raise ConsulACLNotFoundException(token) - return decode_acl_as_json(acl_as_json) - - -def encode_rules_as_hcl_string(rules): - """ - Converts the given rules into the equivalent HCL (string) representation. - :param rules: the rules - :return: the equivalent HCL (string) representation of the rules. Will be None if there is no rules (see internal - note for justification) - """ - if len(rules) == 0: - # Note: empty string is not valid HCL according to `hcl.load` however, the ACL `Rule` property will be an empty - # string if there is no rules... - return None - rules_as_hcl = "" - for rule in rules: - rules_as_hcl += encode_rule_as_hcl_string(rule) - return rules_as_hcl - - -def encode_rule_as_hcl_string(rule): - """ - Converts the given rule into the equivalent HCL (string) representation. - :param rule: the rule - :return: the equivalent HCL (string) representation of the rule - """ - if rule.pattern is not None: - return '%s "%s" {\n %s = "%s"\n}\n' % (rule.scope, rule.pattern, _POLICY_HCL_PROPERTY, rule.policy) - else: - return '%s = "%s"\n' % (rule.scope, rule.policy) - - -def decode_rules_as_hcl_string(rules_as_hcl): - """ - Converts the given HCL (string) representation of rules into a list of rule domain models. - :param rules_as_hcl: the HCL (string) representation of a collection of rules - :return: the equivalent domain model to the given rules - """ - rules_as_hcl = to_text(rules_as_hcl) - rules_as_json = hcl.loads(rules_as_hcl) - return decode_rules_as_json(rules_as_json) - - -def decode_rules_as_json(rules_as_json): - """ - Converts the given JSON representation of rules into a list of rule domain models. - :param rules_as_json: the JSON representation of a collection of rules - :return: the equivalent domain model to the given rules - """ - rules = RuleCollection() - for scope in rules_as_json: - if not isinstance(rules_as_json[scope], dict): - rules.add(Rule(scope, rules_as_json[scope])) - else: - for pattern, policy in rules_as_json[scope].items(): - rules.add(Rule(scope, policy[_POLICY_JSON_PROPERTY], pattern)) - return rules - - -def encode_rules_as_json(rules): - """ - Converts the given rules into the equivalent JSON representation according to the documentation: - https://www.consul.io/docs/guides/acl.html#rule-specification. - :param rules: the rules - :return: JSON representation of the given rules - """ - rules_as_json = defaultdict(dict) - for rule in rules: - if rule.pattern is not None: - if rule.pattern in rules_as_json[rule.scope]: - raise AssertionError() - rules_as_json[rule.scope][rule.pattern] = { - _POLICY_JSON_PROPERTY: rule.policy - } - else: - if rule.scope in rules_as_json: - raise AssertionError() - rules_as_json[rule.scope] = rule.policy - return rules_as_json - - -def decode_rules_as_yml(rules_as_yml): - """ - Converts the given YAML representation of rules into a list of rule domain models. - :param rules_as_yml: the YAML representation of a collection of rules - :return: the equivalent domain model to the given rules - """ - rules = RuleCollection() - if rules_as_yml: - for rule_as_yml in rules_as_yml: - rule_added = False - for scope in RULE_SCOPES: - if scope in rule_as_yml: - if rule_as_yml[scope] is None: - raise ValueError("Rule for '%s' does not have a value associated to the scope" % scope) - policy = rule_as_yml[_POLICY_YML_PROPERTY] if _POLICY_YML_PROPERTY in rule_as_yml \ - else rule_as_yml[scope] - pattern = rule_as_yml[scope] if _POLICY_YML_PROPERTY in rule_as_yml else None - rules.add(Rule(scope, policy, pattern)) - rule_added = True - break - if not rule_added: - raise ValueError("A rule requires one of %s and a policy." % ('/'.join(RULE_SCOPES))) - return rules - - -def decode_acl_as_json(acl_as_json): - """ - Converts the given JSON representation of an ACL into the equivalent domain model. - :param acl_as_json: the JSON representation of an ACL - :return: the equivalent domain model to the given ACL - """ - rules_as_hcl = acl_as_json[_RULES_JSON_PROPERTY] - rules = decode_rules_as_hcl_string(acl_as_json[_RULES_JSON_PROPERTY]) if rules_as_hcl.strip() != "" \ - else RuleCollection() - return ACL( - rules=rules, - token_type=acl_as_json[_TOKEN_TYPE_JSON_PROPERTY], - token=acl_as_json[_TOKEN_JSON_PROPERTY], - name=acl_as_json[_NAME_JSON_PROPERTY] - ) - - -def decode_acls_as_json(acls_as_json): - """ - Converts the given JSON representation of ACLs into a list of ACL domain models. - :param acls_as_json: the JSON representation of a collection of ACLs - :return: list of equivalent domain models for the given ACLs (order not guaranteed to be the same) - """ - return [decode_acl_as_json(acl_as_json) for acl_as_json in acls_as_json] - - -class ConsulACLNotFoundException(Exception): - """ - Exception raised if an ACL with is not found. - """ - - -class Configuration: - """ - Configuration for this module. - """ - - def __init__(self, management_token=None, host=None, scheme=None, validate_certs=None, name=None, port=None, - rules=None, state=None, token=None, token_type=None): - self.management_token = management_token # type: str - self.host = host # type: str - self.scheme = scheme # type: str - self.validate_certs = validate_certs # type: bool - self.name = name # type: str - self.port = port # type: int - self.rules = rules # type: RuleCollection - self.state = state # type: str - self.token = token # type: str - self.token_type = token_type # type: str - - -class Output: - """ - Output of an action of this module. - """ - - def __init__(self, changed=None, token=None, rules=None, operation=None): - self.changed = changed # type: bool - self.token = token # type: str - self.rules = rules # type: RuleCollection - self.operation = operation # type: str - - -class ACL: - """ - Consul ACL. See: https://www.consul.io/docs/guides/acl.html. - """ - - def __init__(self, rules, token_type, token, name): - self.rules = rules - self.token_type = token_type - self.token = token - self.name = name - - def __eq__(self, other): - return other \ - and isinstance(other, self.__class__) \ - and self.rules == other.rules \ - and self.token_type == other.token_type \ - and self.token == other.token \ - and self.name == other.name - - def __hash__(self): - return hash(self.rules) ^ hash(self.token_type) ^ hash(self.token) ^ hash(self.name) - - -class Rule: - """ - ACL rule. See: https://www.consul.io/docs/guides/acl.html#acl-rules-and-scope. - """ - - def __init__(self, scope, policy, pattern=None): - self.scope = scope - self.policy = policy - self.pattern = pattern - - def __eq__(self, other): - return isinstance(other, self.__class__) \ - and self.scope == other.scope \ - and self.policy == other.policy \ - and self.pattern == other.pattern - - def __ne__(self, other): - return not self.__eq__(other) - - def __hash__(self): - return (hash(self.scope) ^ hash(self.policy)) ^ hash(self.pattern) - - def __str__(self): - return encode_rule_as_hcl_string(self) - - -class RuleCollection: - """ - Collection of ACL rules, which are part of a Consul ACL. - """ - - def __init__(self): - self._rules = {} - for scope in RULE_SCOPES: - self._rules[scope] = {} - - def __iter__(self): - all_rules = [] - for scope, pattern_keyed_rules in self._rules.items(): - for pattern, rule in pattern_keyed_rules.items(): - all_rules.append(rule) - return iter(all_rules) - - def __len__(self): - count = 0 - for scope in RULE_SCOPES: - count += len(self._rules[scope]) - return count - - def __eq__(self, other): - return isinstance(other, self.__class__) \ - and set(self) == set(other) - - def __ne__(self, other): - return not self.__eq__(other) - - def __str__(self): - return encode_rules_as_hcl_string(self) - - def add(self, rule): - """ - Adds the given rule to this collection. - :param rule: model of a rule - :raises ValueError: raised if there already exists a rule for a given scope and pattern - """ - if rule.pattern in self._rules[rule.scope]: - patten_info = " and pattern '%s'" % rule.pattern if rule.pattern is not None else "" - raise ValueError("Duplicate rule for scope '%s'%s" % (rule.scope, patten_info)) - self._rules[rule.scope][rule.pattern] = rule - - -def get_consul_client(configuration): - """ - Gets a Consul client for the given configuration. - - Does not check if the Consul client can connect. - :param configuration: the run configuration - :return: Consul client - """ - token = configuration.management_token - if token is None: - token = configuration.token - if token is None: - raise AssertionError("Expecting the management token to always be set") - return consul.Consul(host=configuration.host, port=configuration.port, scheme=configuration.scheme, - verify=configuration.validate_certs, token=token) - - -def check_dependencies(): - """ - Checks that the required dependencies have been imported. - :exception ImportError: if it is detected that any of the required dependencies have not been imported - """ - if not python_consul_installed: - raise ImportError("python-consul required for this module. " - "See: https://python-consul.readthedocs.io/en/latest/#installation") - - if not pyhcl_installed: - raise ImportError("pyhcl required for this module. " - "See: https://pypi.org/project/pyhcl/") - - if not has_requests: - raise ImportError("requests required for this module. See https://pypi.org/project/requests/") - - -def main(): - """ - Main method. - """ - module = AnsibleModule(_ARGUMENT_SPEC, supports_check_mode=False) - - try: - check_dependencies() - except ImportError as e: - module.fail_json(msg=str(e)) - - configuration = Configuration( - management_token=module.params.get(MANAGEMENT_PARAMETER_NAME), - host=module.params.get(HOST_PARAMETER_NAME), - scheme=module.params.get(SCHEME_PARAMETER_NAME), - validate_certs=module.params.get(VALIDATE_CERTS_PARAMETER_NAME), - name=module.params.get(NAME_PARAMETER_NAME), - port=module.params.get(PORT_PARAMETER_NAME), - rules=decode_rules_as_yml(module.params.get(RULES_PARAMETER_NAME)), - state=module.params.get(STATE_PARAMETER_NAME), - token=module.params.get(TOKEN_PARAMETER_NAME), - token_type=module.params.get(TOKEN_TYPE_PARAMETER_NAME) - ) - consul_client = get_consul_client(configuration) - - try: - if configuration.state == PRESENT_STATE_VALUE: - output = set_acl(consul_client, configuration) - else: - output = remove_acl(consul_client, configuration) - except ConnectionError as e: - module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( - configuration.host, configuration.port, str(e))) - raise - - return_values = dict(changed=output.changed, token=output.token, operation=output.operation) - if output.rules is not None: - return_values["rules"] = encode_rules_as_json(output.rules) - module.exit_json(**return_values) - - -if __name__ == "__main__": - main() diff --git a/ansible_collections/community/general/plugins/modules/clustering/consul/consul_kv.py b/ansible_collections/community/general/plugins/modules/clustering/consul/consul_kv.py deleted file mode 100644 index f7b33b85..00000000 --- a/ansible_collections/community/general/plugins/modules/clustering/consul/consul_kv.py +++ /dev/null @@ -1,328 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2015, Steve Gargan -# (c) 2018 Genome Research Ltd. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: consul_kv -short_description: Manipulate entries in the key/value store of a consul cluster -description: - - Allows the retrieval, addition, modification and deletion of key/value entries in a - consul cluster via the agent. The entire contents of the record, including - the indices, flags and session are returned as C(value). - - If the C(key) represents a prefix then note that when a value is removed, the existing - value if any is returned as part of the results. - - See http://www.consul.io/docs/agent/http.html#kv for more details. -requirements: - - python-consul - - requests -author: - - Steve Gargan (@sgargan) - - Colin Nolan (@colin-nolan) -options: - state: - description: - - The action to take with the supplied key and value. If the state is 'present' and `value` is set, the key - contents will be set to the value supplied and `changed` will be set to `true` only if the value was - different to the current contents. If the state is 'present' and `value` is not set, the existing value - associated to the key will be returned. The state 'absent' will remove the key/value pair, - again 'changed' will be set to true only if the key actually existed - prior to the removal. An attempt can be made to obtain or free the - lock associated with a key/value pair with the states 'acquire' or - 'release' respectively. a valid session must be supplied to make the - attempt changed will be true if the attempt is successful, false - otherwise. - type: str - choices: [ absent, acquire, present, release ] - default: present - key: - description: - - The key at which the value should be stored. - type: str - required: yes - value: - description: - - The value should be associated with the given key, required if C(state) - is C(present). - type: str - recurse: - description: - - If the key represents a prefix, each entry with the prefix can be - retrieved by setting this to C(yes). - type: bool - retrieve: - description: - - If the I(state) is C(present) and I(value) is set, perform a - read after setting the value and return this value. - default: True - type: bool - session: - description: - - The session that should be used to acquire or release a lock - associated with a key/value pair. - type: str - token: - description: - - The token key identifying an ACL rule set that controls access to - the key value pair - type: str - cas: - description: - - Used when acquiring a lock with a session. If the C(cas) is C(0), then - Consul will only put the key if it does not already exist. If the - C(cas) value is non-zero, then the key is only set if the index matches - the ModifyIndex of that key. - type: str - flags: - description: - - Opaque positive integer value that can be passed when setting a value. - type: str - host: - description: - - Host of the consul agent. - type: str - default: localhost - port: - description: - - The port on which the consul agent is running. - type: int - default: 8500 - scheme: - description: - - The protocol scheme on which the consul agent is running. - type: str - default: http - validate_certs: - description: - - Whether to verify the tls certificate of the consul agent. - type: bool - default: 'yes' -''' - - -EXAMPLES = ''' -# If the key does not exist, the value associated to the "data" property in `retrieved_key` will be `None` -# If the key value is empty string, `retrieved_key["data"]["Value"]` will be `None` -- name: Retrieve a value from the key/value store - community.general.consul_kv: - key: somekey - register: retrieved_key - -- name: Add or update the value associated with a key in the key/value store - community.general.consul_kv: - key: somekey - value: somevalue - -- name: Remove a key from the store - community.general.consul_kv: - key: somekey - state: absent - -- name: Add a node to an arbitrary group via consul inventory (see consul.ini) - community.general.consul_kv: - key: ansible/groups/dc1/somenode - value: top_secret - -- name: Register a key/value pair with an associated session - community.general.consul_kv: - key: stg/node/server_birthday - value: 20160509 - session: "{{ sessionid }}" - state: acquire -''' - -from ansible.module_utils.common.text.converters import to_text - -try: - import consul - from requests.exceptions import ConnectionError - python_consul_installed = True -except ImportError: - python_consul_installed = False - -from ansible.module_utils.basic import AnsibleModule - -# Note: although the python-consul documentation implies that using a key with a value of `None` with `put` has a -# special meaning (https://python-consul.readthedocs.io/en/latest/#consul-kv), if not set in the subsequently API call, -# the value just defaults to an empty string (https://www.consul.io/api/kv.html#create-update-key) -NOT_SET = None - - -def _has_value_changed(consul_client, key, target_value): - """ - Uses the given Consul client to determine if the value associated to the given key is different to the given target - value. - :param consul_client: Consul connected client - :param key: key in Consul - :param target_value: value to be associated to the key - :return: tuple where the first element is the value of the "X-Consul-Index" header and the second is `True` if the - value has changed (i.e. the stored value is not the target value) - """ - index, existing = consul_client.kv.get(key) - if not existing: - return index, True - try: - changed = to_text(existing['Value'], errors='surrogate_or_strict') != target_value - return index, changed - except UnicodeError: - # Existing value was not decodable but all values we set are valid utf-8 - return index, True - - -def execute(module): - state = module.params.get('state') - - if state == 'acquire' or state == 'release': - lock(module, state) - elif state == 'present': - if module.params.get('value') is NOT_SET: - get_value(module) - else: - set_value(module) - elif state == 'absent': - remove_value(module) - else: - module.exit_json(msg="Unsupported state: %s" % (state, )) - - -def lock(module, state): - - consul_api = get_consul_api(module) - - session = module.params.get('session') - key = module.params.get('key') - value = module.params.get('value') - - if not session: - module.fail( - msg='%s of lock for %s requested but no session supplied' % - (state, key)) - - index, changed = _has_value_changed(consul_api, key, value) - - if changed and not module.check_mode: - if state == 'acquire': - changed = consul_api.kv.put(key, value, - cas=module.params.get('cas'), - acquire=session, - flags=module.params.get('flags')) - else: - changed = consul_api.kv.put(key, value, - cas=module.params.get('cas'), - release=session, - flags=module.params.get('flags')) - - module.exit_json(changed=changed, - index=index, - key=key) - - -def get_value(module): - consul_api = get_consul_api(module) - key = module.params.get('key') - - index, existing_value = consul_api.kv.get(key, recurse=module.params.get('recurse')) - - module.exit_json(changed=False, index=index, data=existing_value) - - -def set_value(module): - consul_api = get_consul_api(module) - - key = module.params.get('key') - value = module.params.get('value') - - if value is NOT_SET: - raise AssertionError('Cannot set value of "%s" to `NOT_SET`' % key) - - index, changed = _has_value_changed(consul_api, key, value) - - if changed and not module.check_mode: - changed = consul_api.kv.put(key, value, - cas=module.params.get('cas'), - flags=module.params.get('flags')) - - stored = None - if module.params.get('retrieve'): - index, stored = consul_api.kv.get(key) - - module.exit_json(changed=changed, - index=index, - key=key, - data=stored) - - -def remove_value(module): - ''' remove the value associated with the given key. if the recurse parameter - is set then any key prefixed with the given key will be removed. ''' - consul_api = get_consul_api(module) - - key = module.params.get('key') - - index, existing = consul_api.kv.get( - key, recurse=module.params.get('recurse')) - - changed = existing is not None - if changed and not module.check_mode: - consul_api.kv.delete(key, module.params.get('recurse')) - - module.exit_json(changed=changed, - index=index, - key=key, - data=existing) - - -def get_consul_api(module, token=None): - return consul.Consul(host=module.params.get('host'), - port=module.params.get('port'), - scheme=module.params.get('scheme'), - verify=module.params.get('validate_certs'), - token=module.params.get('token')) - - -def test_dependencies(module): - if not python_consul_installed: - module.fail_json(msg="python-consul required for this module. " - "see https://python-consul.readthedocs.io/en/latest/#installation") - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - cas=dict(type='str'), - flags=dict(type='str'), - key=dict(type='str', required=True, no_log=False), - host=dict(type='str', default='localhost'), - scheme=dict(type='str', default='http'), - validate_certs=dict(type='bool', default=True), - port=dict(type='int', default=8500), - recurse=dict(type='bool'), - retrieve=dict(type='bool', default=True), - state=dict(type='str', default='present', choices=['absent', 'acquire', 'present', 'release']), - token=dict(type='str', no_log=True), - value=dict(type='str', default=NOT_SET), - session=dict(type='str'), - ), - supports_check_mode=True - ) - - test_dependencies(module) - - try: - execute(module) - except ConnectionError as e: - module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( - module.params.get('host'), module.params.get('port'), e)) - except Exception as e: - module.fail_json(msg=str(e)) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/clustering/consul/consul_session.py b/ansible_collections/community/general/plugins/modules/clustering/consul/consul_session.py deleted file mode 100644 index 7ace1f89..00000000 --- a/ansible_collections/community/general/plugins/modules/clustering/consul/consul_session.py +++ /dev/null @@ -1,277 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2015, Steve Gargan -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' -module: consul_session -short_description: Manipulate consul sessions -description: - - Allows the addition, modification and deletion of sessions in a consul - cluster. These sessions can then be used in conjunction with key value pairs - to implement distributed locks. In depth documentation for working with - sessions can be found at http://www.consul.io/docs/internals/sessions.html -requirements: - - python-consul - - requests -author: -- Steve Gargan (@sgargan) -options: - id: - description: - - ID of the session, required when I(state) is either C(info) or - C(remove). - type: str - state: - description: - - Whether the session should be present i.e. created if it doesn't - exist, or absent, removed if present. If created, the I(id) for the - session is returned in the output. If C(absent), I(id) is - required to remove the session. Info for a single session, all the - sessions for a node or all available sessions can be retrieved by - specifying C(info), C(node) or C(list) for the I(state); for C(node) - or C(info), the node I(name) or session I(id) is required as parameter. - choices: [ absent, info, list, node, present ] - type: str - default: present - name: - description: - - The name that should be associated with the session. Required when - I(state=node) is used. - type: str - delay: - description: - - The optional lock delay that can be attached to the session when it - is created. Locks for invalidated sessions ar blocked from being - acquired until this delay has expired. Durations are in seconds. - type: int - default: 15 - node: - description: - - The name of the node that with which the session will be associated. - by default this is the name of the agent. - type: str - datacenter: - description: - - The name of the datacenter in which the session exists or should be - created. - type: str - checks: - description: - - Checks that will be used to verify the session health. If - all the checks fail, the session will be invalidated and any locks - associated with the session will be release and can be acquired once - the associated lock delay has expired. - type: list - elements: str - host: - description: - - The host of the consul agent defaults to localhost. - type: str - default: localhost - port: - description: - - The port on which the consul agent is running. - type: int - default: 8500 - scheme: - description: - - The protocol scheme on which the consul agent is running. - type: str - default: http - validate_certs: - description: - - Whether to verify the TLS certificate of the consul agent. - type: bool - default: True - behavior: - description: - - The optional behavior that can be attached to the session when it - is created. This controls the behavior when a session is invalidated. - choices: [ delete, release ] - type: str - default: release -''' - -EXAMPLES = ''' -- name: Register basic session with consul - community.general.consul_session: - name: session1 - -- name: Register a session with an existing check - community.general.consul_session: - name: session_with_check - checks: - - existing_check_name - -- name: Register a session with lock_delay - community.general.consul_session: - name: session_with_delay - delay: 20s - -- name: Retrieve info about session by id - community.general.consul_session: - id: session_id - state: info - -- name: Retrieve active sessions - community.general.consul_session: - state: list -''' - -try: - import consul - from requests.exceptions import ConnectionError - python_consul_installed = True -except ImportError: - python_consul_installed = False - -from ansible.module_utils.basic import AnsibleModule - - -def execute(module): - - state = module.params.get('state') - - if state in ['info', 'list', 'node']: - lookup_sessions(module) - elif state == 'present': - update_session(module) - else: - remove_session(module) - - -def lookup_sessions(module): - - datacenter = module.params.get('datacenter') - - state = module.params.get('state') - consul_client = get_consul_api(module) - try: - if state == 'list': - sessions_list = consul_client.session.list(dc=datacenter) - # Ditch the index, this can be grabbed from the results - if sessions_list and len(sessions_list) >= 2: - sessions_list = sessions_list[1] - module.exit_json(changed=True, - sessions=sessions_list) - elif state == 'node': - node = module.params.get('node') - sessions = consul_client.session.node(node, dc=datacenter) - module.exit_json(changed=True, - node=node, - sessions=sessions) - elif state == 'info': - session_id = module.params.get('id') - - session_by_id = consul_client.session.info(session_id, dc=datacenter) - module.exit_json(changed=True, - session_id=session_id, - sessions=session_by_id) - - except Exception as e: - module.fail_json(msg="Could not retrieve session info %s" % e) - - -def update_session(module): - - name = module.params.get('name') - delay = module.params.get('delay') - checks = module.params.get('checks') - datacenter = module.params.get('datacenter') - node = module.params.get('node') - behavior = module.params.get('behavior') - - consul_client = get_consul_api(module) - - try: - session = consul_client.session.create( - name=name, - behavior=behavior, - node=node, - lock_delay=delay, - dc=datacenter, - checks=checks - ) - module.exit_json(changed=True, - session_id=session, - name=name, - behavior=behavior, - delay=delay, - checks=checks, - node=node) - except Exception as e: - module.fail_json(msg="Could not create/update session %s" % e) - - -def remove_session(module): - session_id = module.params.get('id') - - consul_client = get_consul_api(module) - - try: - consul_client.session.destroy(session_id) - - module.exit_json(changed=True, - session_id=session_id) - except Exception as e: - module.fail_json(msg="Could not remove session with id '%s' %s" % ( - session_id, e)) - - -def get_consul_api(module): - return consul.Consul(host=module.params.get('host'), - port=module.params.get('port'), - scheme=module.params.get('scheme'), - verify=module.params.get('validate_certs')) - - -def test_dependencies(module): - if not python_consul_installed: - module.fail_json(msg="python-consul required for this module. " - "see https://python-consul.readthedocs.io/en/latest/#installation") - - -def main(): - argument_spec = dict( - checks=dict(type='list', elements='str'), - delay=dict(type='int', default='15'), - behavior=dict(type='str', default='release', choices=['release', 'delete']), - host=dict(type='str', default='localhost'), - port=dict(type='int', default=8500), - scheme=dict(type='str', default='http'), - validate_certs=dict(type='bool', default=True), - id=dict(type='str'), - name=dict(type='str'), - node=dict(type='str'), - state=dict(type='str', default='present', choices=['absent', 'info', 'list', 'node', 'present']), - datacenter=dict(type='str'), - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_if=[ - ('state', 'node', ['name']), - ('state', 'info', ['id']), - ('state', 'remove', ['id']), - ], - supports_check_mode=False - ) - - test_dependencies(module) - - try: - execute(module) - except ConnectionError as e: - module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( - module.params.get('host'), module.params.get('port'), e)) - except Exception as e: - module.fail_json(msg=str(e)) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/clustering/etcd3.py b/ansible_collections/community/general/plugins/modules/clustering/etcd3.py deleted file mode 100644 index 6a095133..00000000 --- a/ansible_collections/community/general/plugins/modules/clustering/etcd3.py +++ /dev/null @@ -1,252 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, Jean-Philippe Evrard -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: etcd3 -short_description: "Set or delete key value pairs from an etcd3 cluster" -requirements: - - etcd3 -description: - - Sets or deletes values in etcd3 cluster using its v3 api. - - Needs python etcd3 lib to work -options: - key: - type: str - description: - - the key where the information is stored in the cluster - required: true - value: - type: str - description: - - the information stored - required: true - host: - type: str - description: - - the IP address of the cluster - default: 'localhost' - port: - type: int - description: - - the port number used to connect to the cluster - default: 2379 - state: - type: str - description: - - the state of the value for the key. - - can be present or absent - required: true - choices: [ present, absent ] - user: - type: str - description: - - The etcd user to authenticate with. - password: - type: str - description: - - The password to use for authentication. - - Required if I(user) is defined. - ca_cert: - type: path - description: - - The Certificate Authority to use to verify the etcd host. - - Required if I(client_cert) and I(client_key) are defined. - client_cert: - type: path - description: - - PEM formatted certificate chain file to be used for SSL client authentication. - - Required if I(client_key) is defined. - client_key: - type: path - description: - - PEM formatted file that contains your private key to be used for SSL client authentication. - - Required if I(client_cert) is defined. - timeout: - type: int - description: - - The socket level timeout in seconds. -author: - - Jean-Philippe Evrard (@evrardjp) - - Victor Fauth (@vfauth) -''' - -EXAMPLES = """ -- name: Store a value "bar" under the key "foo" for a cluster located "http://localhost:2379" - community.general.etcd3: - key: "foo" - value: "baz3" - host: "localhost" - port: 2379 - state: "present" - -- name: Authenticate using user/password combination with a timeout of 10 seconds - community.general.etcd3: - key: "foo" - value: "baz3" - state: "present" - user: "someone" - password: "password123" - timeout: 10 - -- name: Authenticate using TLS certificates - community.general.etcd3: - key: "foo" - value: "baz3" - state: "present" - ca_cert: "/etc/ssl/certs/CA_CERT.pem" - client_cert: "/etc/ssl/certs/cert.crt" - client_key: "/etc/ssl/private/key.pem" -""" - -RETURN = ''' -key: - description: The key that was queried - returned: always - type: str -old_value: - description: The previous value in the cluster - returned: always - type: str -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -try: - import etcd3 - HAS_ETCD = True -except ImportError: - ETCD_IMP_ERR = traceback.format_exc() - HAS_ETCD = False - - -def run_module(): - # define the available arguments/parameters that a user can pass to - # the module - module_args = dict( - key=dict(type='str', required=True, no_log=False), - value=dict(type='str', required=True), - host=dict(type='str', default='localhost'), - port=dict(type='int', default=2379), - state=dict(type='str', required=True, choices=['present', 'absent']), - user=dict(type='str'), - password=dict(type='str', no_log=True), - ca_cert=dict(type='path'), - client_cert=dict(type='path'), - client_key=dict(type='path'), - timeout=dict(type='int'), - ) - - # seed the result dict in the object - # we primarily care about changed and state - # change is if this module effectively modified the target - # state will include any data that you want your module to pass back - # for consumption, for example, in a subsequent task - result = dict( - changed=False, - ) - - # the AnsibleModule object will be our abstraction working with Ansible - # this includes instantiation, a couple of common attr would be the - # args/params passed to the execution, as well as if the module - # supports check mode - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True, - required_together=[['client_cert', 'client_key'], ['user', 'password']], - ) - - # It is possible to set `ca_cert` to verify the server identity without - # setting `client_cert` or `client_key` to authenticate the client - # so required_together is enough - # Due to `required_together=[['client_cert', 'client_key']]`, checking the presence - # of either `client_cert` or `client_key` is enough - if module.params['ca_cert'] is None and module.params['client_cert'] is not None: - module.fail_json(msg="The 'ca_cert' parameter must be defined when 'client_cert' and 'client_key' are present.") - - result['key'] = module.params.get('key') - module.params['cert_cert'] = module.params.pop('client_cert') - module.params['cert_key'] = module.params.pop('client_key') - - if not HAS_ETCD: - module.fail_json(msg=missing_required_lib('etcd3'), exception=ETCD_IMP_ERR) - - allowed_keys = ['host', 'port', 'ca_cert', 'cert_cert', 'cert_key', - 'timeout', 'user', 'password'] - # TODO(evrardjp): Move this back to a dict comprehension when python 2.7 is - # the minimum supported version - # client_params = {key: value for key, value in module.params.items() if key in allowed_keys} - client_params = dict() - for key, value in module.params.items(): - if key in allowed_keys: - client_params[key] = value - try: - etcd = etcd3.client(**client_params) - except Exception as exp: - module.fail_json(msg='Cannot connect to etcd cluster: %s' % (to_native(exp)), - exception=traceback.format_exc()) - try: - cluster_value = etcd.get(module.params['key']) - except Exception as exp: - module.fail_json(msg='Cannot reach data: %s' % (to_native(exp)), - exception=traceback.format_exc()) - - # Make the cluster_value[0] a string for string comparisons - result['old_value'] = to_native(cluster_value[0]) - - if module.params['state'] == 'absent': - if cluster_value[0] is not None: - if module.check_mode: - result['changed'] = True - else: - try: - etcd.delete(module.params['key']) - except Exception as exp: - module.fail_json(msg='Cannot delete %s: %s' % (module.params['key'], to_native(exp)), - exception=traceback.format_exc()) - else: - result['changed'] = True - elif module.params['state'] == 'present': - if result['old_value'] != module.params['value']: - if module.check_mode: - result['changed'] = True - else: - try: - etcd.put(module.params['key'], module.params['value']) - except Exception as exp: - module.fail_json(msg='Cannot add or edit key %s: %s' % (module.params['key'], to_native(exp)), - exception=traceback.format_exc()) - else: - result['changed'] = True - else: - module.fail_json(msg="State not recognized") - - # manipulate or modify the state as needed (this is going to be the - # part where your module will do what it needs to do) - - # during the execution of the module, if there is an exception or a - # conditional state that effectively causes a failure, run - # AnsibleModule.fail_json() to pass in the message and the result - - # in the event of a successful module execution, you will want to - # simple AnsibleModule.exit_json(), passing the key/value results - module.exit_json(**result) - - -def main(): - run_module() - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/clustering/nomad/nomad_job.py b/ansible_collections/community/general/plugins/modules/clustering/nomad/nomad_job.py deleted file mode 100644 index 341592be..00000000 --- a/ansible_collections/community/general/plugins/modules/clustering/nomad/nomad_job.py +++ /dev/null @@ -1,255 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2020, FERREIRA Christophe -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: nomad_job -author: FERREIRA Christophe (@chris93111) -version_added: "1.3.0" -short_description: Launch a Nomad Job -description: - - Launch a Nomad job. - - Stop a Nomad job. - - Force start a Nomad job -requirements: - - python-nomad -extends_documentation_fragment: - - community.general.nomad -options: - name: - description: - - Name of job for delete, stop and start job without source. - - Name of job for delete, stop and start job without source. - - Either this or I(content) must be specified. - type: str - state: - description: - - Deploy or remove job. - choices: ["present", "absent"] - required: true - type: str - force_start: - description: - - Force job to started. - type: bool - default: false - content: - description: - - Content of Nomad job. - - Either this or I(name) must be specified. - type: str - content_format: - description: - - Type of content of Nomad job. - choices: ["hcl", "json"] - default: hcl - type: str -notes: - - C(check_mode) is supported. -seealso: - - name: Nomad jobs documentation - description: Complete documentation for Nomad API jobs. - link: https://www.nomadproject.io/api-docs/jobs/ -''' - -EXAMPLES = ''' -- name: Create job - community.general.nomad_job: - host: localhost - state: present - content: "{{ lookup('ansible.builtin.file', 'job.hcl') }}" - timeout: 120 - -- name: Stop job - community.general.nomad_job: - host: localhost - state: absent - name: api - -- name: Force job to start - community.general.nomad_job: - host: localhost - state: present - name: api - timeout: 120 - force_start: true -''' - -import json - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - -import_nomad = None -try: - import nomad - import_nomad = True -except ImportError: - import_nomad = False - - -def run(): - module = AnsibleModule( - argument_spec=dict( - host=dict(required=True, type='str'), - state=dict(required=True, choices=['present', 'absent']), - use_ssl=dict(type='bool', default=True), - timeout=dict(type='int', default=5), - validate_certs=dict(type='bool', default=True), - client_cert=dict(type='path', default=None), - client_key=dict(type='path', default=None), - namespace=dict(type='str', default=None), - name=dict(type='str', default=None), - content_format=dict(choices=['hcl', 'json'], default='hcl'), - content=dict(type='str', default=None), - force_start=dict(type='bool', default=False), - token=dict(type='str', default=None, no_log=True) - ), - supports_check_mode=True, - mutually_exclusive=[ - ["name", "content"] - ], - required_one_of=[ - ['name', 'content'] - ] - ) - - if not import_nomad: - module.fail_json(msg=missing_required_lib("python-nomad")) - - certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key')) - - nomad_client = nomad.Nomad( - host=module.params.get('host'), - secure=module.params.get('use_ssl'), - timeout=module.params.get('timeout'), - verify=module.params.get('validate_certs'), - cert=certificate_ssl, - namespace=module.params.get('namespace'), - token=module.params.get('token') - ) - - if module.params.get('state') == "present": - - if module.params.get('name') and not module.params.get('force_start'): - module.fail_json(msg='For start job with name, force_start is needed') - - changed = False - if module.params.get('content'): - - if module.params.get('content_format') == 'json': - - job_json = module.params.get('content') - try: - job_json = json.loads(job_json) - except ValueError as e: - module.fail_json(msg=to_native(e)) - job = dict() - job['job'] = job_json - try: - job_id = job_json.get('ID') - if job_id is None: - module.fail_json(msg="Cannot retrieve job with ID None") - plan = nomad_client.job.plan_job(job_id, job, diff=True) - if not plan['Diff'].get('Type') == "None": - changed = True - if not module.check_mode: - result = nomad_client.jobs.register_job(job) - else: - result = plan - else: - result = plan - except Exception as e: - module.fail_json(msg=to_native(e)) - - if module.params.get('content_format') == 'hcl': - - try: - job_hcl = module.params.get('content') - job_json = nomad_client.jobs.parse(job_hcl) - job = dict() - job['job'] = job_json - except nomad.api.exceptions.BadRequestNomadException as err: - msg = str(err.nomad_resp.reason) + " " + str(err.nomad_resp.text) - module.fail_json(msg=to_native(msg)) - try: - job_id = job_json.get('ID') - plan = nomad_client.job.plan_job(job_id, job, diff=True) - if not plan['Diff'].get('Type') == "None": - changed = True - if not module.check_mode: - result = nomad_client.jobs.register_job(job) - else: - result = plan - else: - result = plan - except Exception as e: - module.fail_json(msg=to_native(e)) - - if module.params.get('force_start'): - - try: - job = dict() - if module.params.get('name'): - job_name = module.params.get('name') - else: - job_name = job_json['Name'] - job_json = nomad_client.job.get_job(job_name) - if job_json['Status'] == 'running': - result = job_json - else: - job_json['Status'] = 'running' - job_json['Stop'] = False - job['job'] = job_json - if not module.check_mode: - result = nomad_client.jobs.register_job(job) - else: - result = nomad_client.validate.validate_job(job) - if not result.status_code == 200: - module.fail_json(msg=to_native(result.text)) - result = json.loads(result.text) - changed = True - except Exception as e: - module.fail_json(msg=to_native(e)) - - if module.params.get('state') == "absent": - - try: - if not module.params.get('name') is None: - job_name = module.params.get('name') - else: - if module.params.get('content_format') == 'hcl': - job_json = nomad_client.jobs.parse(module.params.get('content')) - job_name = job_json['Name'] - if module.params.get('content_format') == 'json': - job_json = module.params.get('content') - job_name = job_json['Name'] - job = nomad_client.job.get_job(job_name) - if job['Status'] == 'dead': - changed = False - result = job - else: - if not module.check_mode: - result = nomad_client.job.deregister_job(job_name) - else: - result = job - changed = True - except Exception as e: - module.fail_json(msg=to_native(e)) - - module.exit_json(changed=changed, result=result) - - -def main(): - - run() - - -if __name__ == "__main__": - main() diff --git a/ansible_collections/community/general/plugins/modules/clustering/nomad/nomad_job_info.py b/ansible_collections/community/general/plugins/modules/clustering/nomad/nomad_job_info.py deleted file mode 100644 index d49111bb..00000000 --- a/ansible_collections/community/general/plugins/modules/clustering/nomad/nomad_job_info.py +++ /dev/null @@ -1,344 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2020, FERREIRA Christophe -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: nomad_job_info -author: FERREIRA Christophe (@chris93111) -version_added: "1.3.0" -short_description: Get Nomad Jobs info -description: - - Get info for one Nomad job. - - List Nomad jobs. -requirements: - - python-nomad -extends_documentation_fragment: - - community.general.nomad -options: - name: - description: - - Name of job for Get info. - - If not specified, lists all jobs. - type: str -notes: - - C(check_mode) is supported. -seealso: - - name: Nomad jobs documentation - description: Complete documentation for Nomad API jobs. - link: https://www.nomadproject.io/api-docs/jobs/ -''' - -EXAMPLES = ''' -- name: Get info for job awx - community.general.nomad_job_info: - host: localhost - name: awx - register: result - -- name: List Nomad jobs - community.general.nomad_job_info: - host: localhost - register: result - -''' - -RETURN = ''' -result: - description: List with dictionary contains jobs info - returned: success - type: list - sample: [ - { - "Affinities": null, - "AllAtOnce": false, - "Constraints": null, - "ConsulToken": "", - "CreateIndex": 13, - "Datacenters": [ - "dc1" - ], - "Dispatched": false, - "ID": "example", - "JobModifyIndex": 13, - "Meta": null, - "ModifyIndex": 13, - "Multiregion": null, - "Name": "example", - "Namespace": "default", - "NomadTokenID": "", - "ParameterizedJob": null, - "ParentID": "", - "Payload": null, - "Periodic": null, - "Priority": 50, - "Region": "global", - "Spreads": null, - "Stable": false, - "Status": "pending", - "StatusDescription": "", - "Stop": false, - "SubmitTime": 1602244370615307000, - "TaskGroups": [ - { - "Affinities": null, - "Constraints": null, - "Count": 1, - "EphemeralDisk": { - "Migrate": false, - "SizeMB": 300, - "Sticky": false - }, - "Meta": null, - "Migrate": { - "HealthCheck": "checks", - "HealthyDeadline": 300000000000, - "MaxParallel": 1, - "MinHealthyTime": 10000000000 - }, - "Name": "cache", - "Networks": null, - "ReschedulePolicy": { - "Attempts": 0, - "Delay": 30000000000, - "DelayFunction": "exponential", - "Interval": 0, - "MaxDelay": 3600000000000, - "Unlimited": true - }, - "RestartPolicy": { - "Attempts": 3, - "Delay": 15000000000, - "Interval": 1800000000000, - "Mode": "fail" - }, - "Scaling": null, - "Services": null, - "ShutdownDelay": null, - "Spreads": null, - "StopAfterClientDisconnect": null, - "Tasks": [ - { - "Affinities": null, - "Artifacts": null, - "CSIPluginConfig": null, - "Config": { - "image": "redis:3.2", - "port_map": [ - { - "db": 6379.0 - } - ] - }, - "Constraints": null, - "DispatchPayload": null, - "Driver": "docker", - "Env": null, - "KillSignal": "", - "KillTimeout": 5000000000, - "Kind": "", - "Leader": false, - "Lifecycle": null, - "LogConfig": { - "MaxFileSizeMB": 10, - "MaxFiles": 10 - }, - "Meta": null, - "Name": "redis", - "Resources": { - "CPU": 500, - "Devices": null, - "DiskMB": 0, - "IOPS": 0, - "MemoryMB": 256, - "Networks": [ - { - "CIDR": "", - "DNS": null, - "Device": "", - "DynamicPorts": [ - { - "HostNetwork": "default", - "Label": "db", - "To": 0, - "Value": 0 - } - ], - "IP": "", - "MBits": 10, - "Mode": "", - "ReservedPorts": null - } - ] - }, - "RestartPolicy": { - "Attempts": 3, - "Delay": 15000000000, - "Interval": 1800000000000, - "Mode": "fail" - }, - "Services": [ - { - "AddressMode": "auto", - "CanaryMeta": null, - "CanaryTags": null, - "Checks": [ - { - "AddressMode": "", - "Args": null, - "CheckRestart": null, - "Command": "", - "Expose": false, - "FailuresBeforeCritical": 0, - "GRPCService": "", - "GRPCUseTLS": false, - "Header": null, - "InitialStatus": "", - "Interval": 10000000000, - "Method": "", - "Name": "alive", - "Path": "", - "PortLabel": "", - "Protocol": "", - "SuccessBeforePassing": 0, - "TLSSkipVerify": false, - "TaskName": "", - "Timeout": 2000000000, - "Type": "tcp" - } - ], - "Connect": null, - "EnableTagOverride": false, - "Meta": null, - "Name": "redis-cache", - "PortLabel": "db", - "Tags": [ - "global", - "cache" - ], - "TaskName": "" - } - ], - "ShutdownDelay": 0, - "Templates": null, - "User": "", - "Vault": null, - "VolumeMounts": null - } - ], - "Update": { - "AutoPromote": false, - "AutoRevert": false, - "Canary": 0, - "HealthCheck": "checks", - "HealthyDeadline": 180000000000, - "MaxParallel": 1, - "MinHealthyTime": 10000000000, - "ProgressDeadline": 600000000000, - "Stagger": 30000000000 - }, - "Volumes": null - } - ], - "Type": "service", - "Update": { - "AutoPromote": false, - "AutoRevert": false, - "Canary": 0, - "HealthCheck": "", - "HealthyDeadline": 0, - "MaxParallel": 1, - "MinHealthyTime": 0, - "ProgressDeadline": 0, - "Stagger": 30000000000 - }, - "VaultNamespace": "", - "VaultToken": "", - "Version": 0 - } - ] - -''' - - -import os -import json - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - -import_nomad = None -try: - import nomad - import_nomad = True -except ImportError: - import_nomad = False - - -def run(): - module = AnsibleModule( - argument_spec=dict( - host=dict(required=True, type='str'), - use_ssl=dict(type='bool', default=True), - timeout=dict(type='int', default=5), - validate_certs=dict(type='bool', default=True), - client_cert=dict(type='path', default=None), - client_key=dict(type='path', default=None), - namespace=dict(type='str', default=None), - name=dict(type='str', default=None), - token=dict(type='str', default=None, no_log=True) - ), - supports_check_mode=True - ) - - if not import_nomad: - module.fail_json(msg=missing_required_lib("python-nomad")) - - certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key')) - - nomad_client = nomad.Nomad( - host=module.params.get('host'), - secure=module.params.get('use_ssl'), - timeout=module.params.get('timeout'), - verify=module.params.get('validate_certs'), - cert=certificate_ssl, - namespace=module.params.get('namespace'), - token=module.params.get('token') - ) - - changed = False - result = list() - try: - job_list = nomad_client.jobs.get_jobs() - for job in job_list: - result.append(nomad_client.job.get_job(job.get('ID'))) - except Exception as e: - module.fail_json(msg=to_native(e)) - - if module.params.get('name'): - filter = list() - try: - for job in result: - if job.get('ID') == module.params.get('name'): - filter.append(job) - result = filter - if not filter: - module.fail_json(msg="Couldn't find Job with id " + str(module.params.get('name'))) - except Exception as e: - module.fail_json(msg=to_native(e)) - - module.exit_json(changed=changed, result=result) - - -def main(): - - run() - - -if __name__ == "__main__": - main() diff --git a/ansible_collections/community/general/plugins/modules/clustering/pacemaker_cluster.py b/ansible_collections/community/general/plugins/modules/clustering/pacemaker_cluster.py deleted file mode 100644 index 4ec6010f..00000000 --- a/ansible_collections/community/general/plugins/modules/clustering/pacemaker_cluster.py +++ /dev/null @@ -1,222 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Mathieu Bultel -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: pacemaker_cluster -short_description: Manage pacemaker clusters -author: -- Mathieu Bultel (@matbu) -description: - - This module can manage a pacemaker cluster and nodes from Ansible using - the pacemaker cli. -options: - state: - description: - - Indicate desired state of the cluster - choices: [ cleanup, offline, online, restart ] - type: str - node: - description: - - Specify which node of the cluster you want to manage. None == the - cluster status itself, 'all' == check the status of all nodes. - type: str - timeout: - description: - - Timeout when the module should considered that the action has failed - default: 300 - type: int - force: - description: - - Force the change of the cluster state - type: bool - default: 'yes' -''' -EXAMPLES = ''' ---- -- name: Set cluster Online - hosts: localhost - gather_facts: no - tasks: - - name: Get cluster state - community.general.pacemaker_cluster: - state: online -''' - -RETURN = ''' -changed: - description: True if the cluster state has changed - type: bool - returned: always -out: - description: The output of the current state of the cluster. It return a - list of the nodes state. - type: str - sample: 'out: [[" overcloud-controller-0", " Online"]]}' - returned: always -rc: - description: exit code of the module - type: bool - returned: always -''' - -import time - -from ansible.module_utils.basic import AnsibleModule - - -_PCS_CLUSTER_DOWN = "Error: cluster is not currently running on this node" - - -def get_cluster_status(module): - cmd = "pcs cluster status" - rc, out, err = module.run_command(cmd) - if out in _PCS_CLUSTER_DOWN: - return 'offline' - else: - return 'online' - - -def get_node_status(module, node='all'): - if node == 'all': - cmd = "pcs cluster pcsd-status %s" % node - else: - cmd = "pcs cluster pcsd-status" - rc, out, err = module.run_command(cmd) - if rc == 1: - module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err)) - status = [] - for o in out.splitlines(): - status.append(o.split(':')) - return status - - -def clean_cluster(module, timeout): - cmd = "pcs resource cleanup" - rc, out, err = module.run_command(cmd) - if rc == 1: - module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err)) - - -def set_cluster(module, state, timeout, force): - if state == 'online': - cmd = "pcs cluster start" - if state == 'offline': - cmd = "pcs cluster stop" - if force: - cmd = "%s --force" % cmd - rc, out, err = module.run_command(cmd) - if rc == 1: - module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err)) - - t = time.time() - ready = False - while time.time() < t + timeout: - cluster_state = get_cluster_status(module) - if cluster_state == state: - ready = True - break - if not ready: - module.fail_json(msg="Failed to set the state `%s` on the cluster\n" % (state)) - - -def set_node(module, state, timeout, force, node='all'): - # map states - if state == 'online': - cmd = "pcs cluster start" - if state == 'offline': - cmd = "pcs cluster stop" - if force: - cmd = "%s --force" % cmd - - nodes_state = get_node_status(module, node) - for node in nodes_state: - if node[1].strip().lower() != state: - cmd = "%s %s" % (cmd, node[0].strip()) - rc, out, err = module.run_command(cmd) - if rc == 1: - module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err)) - - t = time.time() - ready = False - while time.time() < t + timeout: - nodes_state = get_node_status(module) - for node in nodes_state: - if node[1].strip().lower() == state: - ready = True - break - if not ready: - module.fail_json(msg="Failed to set the state `%s` on the cluster\n" % (state)) - - -def main(): - argument_spec = dict( - state=dict(type='str', choices=['online', 'offline', 'restart', 'cleanup']), - node=dict(type='str'), - timeout=dict(type='int', default=300), - force=dict(type='bool', default=True), - ) - - module = AnsibleModule( - argument_spec, - supports_check_mode=True, - ) - changed = False - state = module.params['state'] - node = module.params['node'] - force = module.params['force'] - timeout = module.params['timeout'] - - if state in ['online', 'offline']: - # Get cluster status - if node is None: - cluster_state = get_cluster_status(module) - if cluster_state == state: - module.exit_json(changed=changed, out=cluster_state) - else: - set_cluster(module, state, timeout, force) - cluster_state = get_cluster_status(module) - if cluster_state == state: - module.exit_json(changed=True, out=cluster_state) - else: - module.fail_json(msg="Fail to bring the cluster %s" % state) - else: - cluster_state = get_node_status(module, node) - # Check cluster state - for node_state in cluster_state: - if node_state[1].strip().lower() == state: - module.exit_json(changed=changed, out=cluster_state) - else: - # Set cluster status if needed - set_cluster(module, state, timeout, force) - cluster_state = get_node_status(module, node) - module.exit_json(changed=True, out=cluster_state) - - if state in ['restart']: - set_cluster(module, 'offline', timeout, force) - cluster_state = get_cluster_status(module) - if cluster_state == 'offline': - set_cluster(module, 'online', timeout, force) - cluster_state = get_cluster_status(module) - if cluster_state == 'online': - module.exit_json(changed=True, out=cluster_state) - else: - module.fail_json(msg="Failed during the restart of the cluster, the cluster can't be started") - else: - module.fail_json(msg="Failed during the restart of the cluster, the cluster can't be stopped") - - if state in ['cleanup']: - clean_cluster(module, timeout) - cluster_state = get_cluster_status(module) - module.exit_json(changed=True, - out=cluster_state) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/clustering/znode.py b/ansible_collections/community/general/plugins/modules/clustering/znode.py deleted file mode 100644 index d55a502b..00000000 --- a/ansible_collections/community/general/plugins/modules/clustering/znode.py +++ /dev/null @@ -1,258 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright 2015 WP Engine, Inc. All rights reserved. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: znode -short_description: Create, delete, retrieve, and update znodes using ZooKeeper -description: - - Create, delete, retrieve, and update znodes using ZooKeeper. -options: - hosts: - description: - - A list of ZooKeeper servers (format '[server]:[port]'). - required: true - type: str - name: - description: - - The path of the znode. - required: true - type: str - value: - description: - - The value assigned to the znode. - type: str - op: - description: - - An operation to perform. Mutually exclusive with state. - choices: [ get, wait, list ] - type: str - state: - description: - - The state to enforce. Mutually exclusive with op. - choices: [ present, absent ] - type: str - timeout: - description: - - The amount of time to wait for a node to appear. - default: 300 - type: int - recursive: - description: - - Recursively delete node and all its children. - type: bool - default: 'no' -requirements: - - kazoo >= 2.1 - - python >= 2.6 -author: "Trey Perry (@treyperry)" -''' - -EXAMPLES = """ -- name: Creating or updating a znode with a given value - community.general.znode: - hosts: 'localhost:2181' - name: /mypath - value: myvalue - state: present - -- name: Getting the value and stat structure for a znode - community.general.znode: - hosts: 'localhost:2181' - name: /mypath - op: get - -- name: Listing a particular znode's children - community.general.znode: - hosts: 'localhost:2181' - name: /zookeeper - op: list - -- name: Waiting 20 seconds for a znode to appear at path /mypath - community.general.znode: - hosts: 'localhost:2181' - name: /mypath - op: wait - timeout: 20 - -- name: Deleting a znode at path /mypath - community.general.znode: - hosts: 'localhost:2181' - name: /mypath - state: absent - -- name: Creating or updating a znode with a given value on a remote Zookeeper - community.general.znode: - hosts: 'my-zookeeper-node:2181' - name: /mypath - value: myvalue - state: present - delegate_to: 127.0.0.1 -""" - -import time -import traceback - -KAZOO_IMP_ERR = None -try: - from kazoo.client import KazooClient - from kazoo.handlers.threading import KazooTimeoutError - KAZOO_INSTALLED = True -except ImportError: - KAZOO_IMP_ERR = traceback.format_exc() - KAZOO_INSTALLED = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_bytes - - -def main(): - module = AnsibleModule( - argument_spec=dict( - hosts=dict(required=True, type='str'), - name=dict(required=True, type='str'), - value=dict(type='str'), - op=dict(choices=['get', 'wait', 'list']), - state=dict(choices=['present', 'absent']), - timeout=dict(default=300, type='int'), - recursive=dict(default=False, type='bool') - ), - supports_check_mode=False - ) - - if not KAZOO_INSTALLED: - module.fail_json(msg=missing_required_lib('kazoo >= 2.1'), exception=KAZOO_IMP_ERR) - - check = check_params(module.params) - if not check['success']: - module.fail_json(msg=check['msg']) - - zoo = KazooCommandProxy(module) - try: - zoo.start() - except KazooTimeoutError: - module.fail_json(msg='The connection to the ZooKeeper ensemble timed out.') - - command_dict = { - 'op': { - 'get': zoo.get, - 'list': zoo.list, - 'wait': zoo.wait - }, - 'state': { - 'present': zoo.present, - 'absent': zoo.absent - } - } - - command_type = 'op' if 'op' in module.params and module.params['op'] is not None else 'state' - method = module.params[command_type] - result, result_dict = command_dict[command_type][method]() - zoo.shutdown() - - if result: - module.exit_json(**result_dict) - else: - module.fail_json(**result_dict) - - -def check_params(params): - if not params['state'] and not params['op']: - return {'success': False, 'msg': 'Please define an operation (op) or a state.'} - - if params['state'] and params['op']: - return {'success': False, 'msg': 'Please choose an operation (op) or a state, but not both.'} - - return {'success': True} - - -class KazooCommandProxy(): - def __init__(self, module): - self.module = module - self.zk = KazooClient(module.params['hosts']) - - def absent(self): - return self._absent(self.module.params['name']) - - def exists(self, znode): - return self.zk.exists(znode) - - def list(self): - children = self.zk.get_children(self.module.params['name']) - return True, {'count': len(children), 'items': children, 'msg': 'Retrieved znodes in path.', - 'znode': self.module.params['name']} - - def present(self): - return self._present(self.module.params['name'], self.module.params['value']) - - def get(self): - return self._get(self.module.params['name']) - - def shutdown(self): - self.zk.stop() - self.zk.close() - - def start(self): - self.zk.start() - - def wait(self): - return self._wait(self.module.params['name'], self.module.params['timeout']) - - def _absent(self, znode): - if self.exists(znode): - self.zk.delete(znode, recursive=self.module.params['recursive']) - return True, {'changed': True, 'msg': 'The znode was deleted.'} - else: - return True, {'changed': False, 'msg': 'The znode does not exist.'} - - def _get(self, path): - if self.exists(path): - value, zstat = self.zk.get(path) - stat_dict = {} - for i in dir(zstat): - if not i.startswith('_'): - attr = getattr(zstat, i) - if isinstance(attr, (int, str)): - stat_dict[i] = attr - result = True, {'msg': 'The node was retrieved.', 'znode': path, 'value': value, - 'stat': stat_dict} - else: - result = False, {'msg': 'The requested node does not exist.'} - - return result - - def _present(self, path, value): - if self.exists(path): - (current_value, zstat) = self.zk.get(path) - if value != current_value: - self.zk.set(path, to_bytes(value)) - return True, {'changed': True, 'msg': 'Updated the znode value.', 'znode': path, - 'value': value} - else: - return True, {'changed': False, 'msg': 'No changes were necessary.', 'znode': path, 'value': value} - else: - self.zk.create(path, to_bytes(value), makepath=True) - return True, {'changed': True, 'msg': 'Created a new znode.', 'znode': path, 'value': value} - - def _wait(self, path, timeout, interval=5): - lim = time.time() + timeout - - while time.time() < lim: - if self.exists(path): - return True, {'msg': 'The node appeared before the configured timeout.', - 'znode': path, 'timeout': timeout} - else: - time.sleep(interval) - - return False, {'msg': 'The node did not appear before the operation timed out.', 'timeout': timeout, - 'znode': path} - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/cobbler_sync.py b/ansible_collections/community/general/plugins/modules/cobbler_sync.py deleted file mode 120000 index 9c1b6ace..00000000 --- a/ansible_collections/community/general/plugins/modules/cobbler_sync.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/cobbler/cobbler_sync.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/cobbler_system.py b/ansible_collections/community/general/plugins/modules/cobbler_system.py deleted file mode 120000 index d4731356..00000000 --- a/ansible_collections/community/general/plugins/modules/cobbler_system.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/cobbler/cobbler_system.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/composer.py b/ansible_collections/community/general/plugins/modules/composer.py deleted file mode 120000 index 33d194fd..00000000 --- a/ansible_collections/community/general/plugins/modules/composer.py +++ /dev/null @@ -1 +0,0 @@ -packaging/language/composer.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/consul.py b/ansible_collections/community/general/plugins/modules/consul.py deleted file mode 120000 index 7f72e9cd..00000000 --- a/ansible_collections/community/general/plugins/modules/consul.py +++ /dev/null @@ -1 +0,0 @@ -clustering/consul/consul.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/consul_acl.py b/ansible_collections/community/general/plugins/modules/consul_acl.py deleted file mode 120000 index d6afb151..00000000 --- a/ansible_collections/community/general/plugins/modules/consul_acl.py +++ /dev/null @@ -1 +0,0 @@ -clustering/consul/consul_acl.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/consul_kv.py b/ansible_collections/community/general/plugins/modules/consul_kv.py deleted file mode 120000 index 4f96db68..00000000 --- a/ansible_collections/community/general/plugins/modules/consul_kv.py +++ /dev/null @@ -1 +0,0 @@ -clustering/consul/consul_kv.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/consul_session.py b/ansible_collections/community/general/plugins/modules/consul_session.py deleted file mode 120000 index e167757a..00000000 --- a/ansible_collections/community/general/plugins/modules/consul_session.py +++ /dev/null @@ -1 +0,0 @@ -clustering/consul/consul_session.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/copr.py b/ansible_collections/community/general/plugins/modules/copr.py deleted file mode 120000 index 6d4f8e2f..00000000 --- a/ansible_collections/community/general/plugins/modules/copr.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/copr.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/cpanm.py b/ansible_collections/community/general/plugins/modules/cpanm.py deleted file mode 120000 index e3dae867..00000000 --- a/ansible_collections/community/general/plugins/modules/cpanm.py +++ /dev/null @@ -1 +0,0 @@ -packaging/language/cpanm.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/cronvar.py b/ansible_collections/community/general/plugins/modules/cronvar.py deleted file mode 120000 index 148f3ccd..00000000 --- a/ansible_collections/community/general/plugins/modules/cronvar.py +++ /dev/null @@ -1 +0,0 @@ -system/cronvar.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/crypttab.py b/ansible_collections/community/general/plugins/modules/crypttab.py deleted file mode 120000 index 189173f0..00000000 --- a/ansible_collections/community/general/plugins/modules/crypttab.py +++ /dev/null @@ -1 +0,0 @@ -system/crypttab.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/database/aerospike/aerospike_migrations.py b/ansible_collections/community/general/plugins/modules/database/aerospike/aerospike_migrations.py deleted file mode 100644 index 27b979ad..00000000 --- a/ansible_collections/community/general/plugins/modules/database/aerospike/aerospike_migrations.py +++ /dev/null @@ -1,521 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -"""short_description: Check or wait for migrations between nodes""" - -# Copyright: (c) 2018, Albert Autin -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: aerospike_migrations -short_description: Check or wait for migrations between nodes -description: - - This can be used to check for migrations in a cluster. - This makes it easy to do a rolling upgrade/update on Aerospike nodes. - - If waiting for migrations is not desired, simply just poll until - port 3000 if available or asinfo -v status returns ok -author: "Albert Autin (@Alb0t)" -options: - host: - description: - - Which host do we use as seed for info connection - required: False - type: str - default: localhost - port: - description: - - Which port to connect to Aerospike on (service port) - required: False - type: int - default: 3000 - connect_timeout: - description: - - How long to try to connect before giving up (milliseconds) - required: False - type: int - default: 1000 - consecutive_good_checks: - description: - - How many times should the cluster report "no migrations" - consecutively before returning OK back to ansible? - required: False - type: int - default: 3 - sleep_between_checks: - description: - - How long to sleep between each check (seconds). - required: False - type: int - default: 60 - tries_limit: - description: - - How many times do we poll before giving up and failing? - default: 300 - required: False - type: int - local_only: - description: - - Do you wish to only check for migrations on the local node - before returning, or do you want all nodes in the cluster - to finish before returning? - required: True - type: bool - min_cluster_size: - description: - - Check will return bad until cluster size is met - or until tries is exhausted - required: False - type: int - default: 1 - fail_on_cluster_change: - description: - - Fail if the cluster key changes - if something else is changing the cluster, we may want to fail - required: False - type: bool - default: True - migrate_tx_key: - description: - - The metric key used to determine if we have tx migrations - remaining. Changeable due to backwards compatibility. - required: False - type: str - default: migrate_tx_partitions_remaining - migrate_rx_key: - description: - - The metric key used to determine if we have rx migrations - remaining. Changeable due to backwards compatibility. - required: False - type: str - default: migrate_rx_partitions_remaining - target_cluster_size: - description: - - When all aerospike builds in the cluster are greater than - version 4.3, then the C(cluster-stable) info command will be used. - Inside this command, you can optionally specify what the target - cluster size is - but it is not necessary. You can still rely on - min_cluster_size if you don't want to use this option. - - If this option is specified on a cluster that has at least 1 - host <4.3 then it will be ignored until the min version reaches - 4.3. - required: False - type: int -''' -EXAMPLES = ''' -# check for migrations on local node -- name: Wait for migrations on local node before proceeding - community.general.aerospike_migrations: - host: "localhost" - connect_timeout: 2000 - consecutive_good_checks: 5 - sleep_between_checks: 15 - tries_limit: 600 - local_only: False - -# example playbook: -- name: Upgrade aerospike - hosts: all - become: true - serial: 1 - tasks: - - name: Install dependencies - ansible.builtin.apt: - name: - - python - - python-pip - - python-setuptools - state: latest - - name: Setup aerospike - ansible.builtin.pip: - name: aerospike -# check for migrations every (sleep_between_checks) -# If at least (consecutive_good_checks) checks come back OK in a row, then return OK. -# Will exit if any exception, which can be caused by bad nodes, -# nodes not returning data, or other reasons. -# Maximum runtime before giving up in this case will be: -# Tries Limit * Sleep Between Checks * delay * retries - - name: Wait for aerospike migrations - community.general.aerospike_migrations: - local_only: True - sleep_between_checks: 1 - tries_limit: 5 - consecutive_good_checks: 3 - fail_on_cluster_change: true - min_cluster_size: 3 - target_cluster_size: 4 - register: migrations_check - until: migrations_check is succeeded - changed_when: false - delay: 60 - retries: 120 - - name: Another thing - ansible.builtin.shell: | - echo foo - - name: Reboot - ansible.builtin.reboot: -''' - -RETURN = ''' -# Returns only a success/failure result. Changed is always false. -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - -LIB_FOUND_ERR = None -try: - import aerospike - from time import sleep - import re -except ImportError as ie: - LIB_FOUND = False - LIB_FOUND_ERR = traceback.format_exc() -else: - LIB_FOUND = True - - -def run_module(): - """run ansible module""" - module_args = dict( - host=dict(type='str', required=False, default='localhost'), - port=dict(type='int', required=False, default=3000), - connect_timeout=dict(type='int', required=False, default=1000), - consecutive_good_checks=dict(type='int', required=False, default=3), - sleep_between_checks=dict(type='int', required=False, default=60), - tries_limit=dict(type='int', required=False, default=300), - local_only=dict(type='bool', required=True), - min_cluster_size=dict(type='int', required=False, default=1), - target_cluster_size=dict(type='int', required=False, default=None), - fail_on_cluster_change=dict(type='bool', required=False, default=True), - migrate_tx_key=dict(type='str', required=False, no_log=False, - default="migrate_tx_partitions_remaining"), - migrate_rx_key=dict(type='str', required=False, no_log=False, - default="migrate_rx_partitions_remaining") - ) - - result = dict( - changed=False, - ) - - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True - ) - if not LIB_FOUND: - module.fail_json(msg=missing_required_lib('aerospike'), - exception=LIB_FOUND_ERR) - - try: - if module.check_mode: - has_migrations, skip_reason = False, None - else: - migrations = Migrations(module) - has_migrations, skip_reason = migrations.has_migs( - module.params['local_only'] - ) - - if has_migrations: - module.fail_json(msg="Failed.", skip_reason=skip_reason) - except Exception as e: - module.fail_json(msg="Error: {0}".format(e)) - - module.exit_json(**result) - - -class Migrations: - """ Check or wait for migrations between nodes """ - - def __init__(self, module): - self.module = module - self._client = self._create_client().connect() - self._nodes = {} - self._update_nodes_list() - self._cluster_statistics = {} - self._update_cluster_statistics() - self._namespaces = set() - self._update_cluster_namespace_list() - self._build_list = set() - self._update_build_list() - self._start_cluster_key = \ - self._cluster_statistics[self._nodes[0]]['cluster_key'] - - def _create_client(self): - """ TODO: add support for auth, tls, and other special features - I won't use those features, so I'll wait until somebody complains - or does it for me (Cross fingers) - create the client object""" - config = { - 'hosts': [ - (self.module.params['host'], self.module.params['port']) - ], - 'policies': { - 'timeout': self.module.params['connect_timeout'] - } - } - return aerospike.client(config) - - def _info_cmd_helper(self, cmd, node=None, delimiter=';'): - """delimiter is for separate stats that come back, NOT for kv - separation which is =""" - if node is None: # If no node passed, use the first one (local) - node = self._nodes[0] - data = self._client.info_node(cmd, node) - data = data.split("\t") - if len(data) != 1 and len(data) != 2: - self.module.fail_json( - msg="Unexpected number of values returned in info command: " + - str(len(data)) - ) - # data will be in format 'command\touput' - data = data[-1] - data = data.rstrip("\n\r") - data_arr = data.split(delimiter) - - # some commands don't return in kv format - # so we dont want a dict from those. - if '=' in data: - retval = dict( - metric.split("=", 1) for metric in data_arr - ) - else: - # if only 1 element found, and not kv, return just the value. - if len(data_arr) == 1: - retval = data_arr[0] - else: - retval = data_arr - return retval - - def _update_build_list(self): - """creates self._build_list which is a unique list - of build versions.""" - self._build_list = set() - for node in self._nodes: - build = self._info_cmd_helper('build', node) - self._build_list.add(build) - - # just checks to see if the version is 4.3 or greater - def _can_use_cluster_stable(self): - # if version <4.3 we can't use cluster-stable info cmd - # regex hack to check for versions beginning with 0-3 or - # beginning with 4.0,4.1,4.2 - if re.search(R'^([0-3]\.|4\.[0-2])', min(self._build_list)): - return False - return True - - def _update_cluster_namespace_list(self): - """ make a unique list of namespaces - TODO: does this work on a rolling namespace add/deletion? - thankfully if it doesn't, we dont need this on builds >=4.3""" - self._namespaces = set() - for node in self._nodes: - namespaces = self._info_cmd_helper('namespaces', node) - for namespace in namespaces: - self._namespaces.add(namespace) - - def _update_cluster_statistics(self): - """create a dict of nodes with their related stats """ - self._cluster_statistics = {} - for node in self._nodes: - self._cluster_statistics[node] = \ - self._info_cmd_helper('statistics', node) - - def _update_nodes_list(self): - """get a fresh list of all the nodes""" - self._nodes = self._client.get_nodes() - if not self._nodes: - self.module.fail_json("Failed to retrieve at least 1 node.") - - def _namespace_has_migs(self, namespace, node=None): - """returns a True or False. - Does the namespace have migrations for the node passed? - If no node passed, uses the local node or the first one in the list""" - namespace_stats = self._info_cmd_helper("namespace/" + namespace, node) - try: - namespace_tx = \ - int(namespace_stats[self.module.params['migrate_tx_key']]) - namespace_rx = \ - int(namespace_stats[self.module.params['migrate_rx_key']]) - except KeyError: - self.module.fail_json( - msg="Did not find partition remaining key:" + - self.module.params['migrate_tx_key'] + - " or key:" + - self.module.params['migrate_rx_key'] + - " in 'namespace/" + - namespace + - "' output." - ) - except TypeError: - self.module.fail_json( - msg="namespace stat returned was not numerical" - ) - return namespace_tx != 0 or namespace_rx != 0 - - def _node_has_migs(self, node=None): - """just calls namespace_has_migs and - if any namespace has migs returns true""" - migs = 0 - self._update_cluster_namespace_list() - for namespace in self._namespaces: - if self._namespace_has_migs(namespace, node): - migs += 1 - return migs != 0 - - def _cluster_key_consistent(self): - """create a dictionary to store what each node - returns the cluster key as. we should end up with only 1 dict key, - with the key being the cluster key.""" - cluster_keys = {} - for node in self._nodes: - cluster_key = self._cluster_statistics[node][ - 'cluster_key'] - if cluster_key not in cluster_keys: - cluster_keys[cluster_key] = 1 - else: - cluster_keys[cluster_key] += 1 - if len(cluster_keys.keys()) == 1 and \ - self._start_cluster_key in cluster_keys: - return True - return False - - def _cluster_migrates_allowed(self): - """ensure all nodes have 'migrate_allowed' in their stats output""" - for node in self._nodes: - node_stats = self._info_cmd_helper('statistics', node) - allowed = node_stats['migrate_allowed'] - if allowed == "false": - return False - return True - - def _cluster_has_migs(self): - """calls node_has_migs for each node""" - migs = 0 - for node in self._nodes: - if self._node_has_migs(node): - migs += 1 - if migs == 0: - return False - return True - - def _has_migs(self, local): - if local: - return self._local_node_has_migs() - return self._cluster_has_migs() - - def _local_node_has_migs(self): - return self._node_has_migs(None) - - def _is_min_cluster_size(self): - """checks that all nodes in the cluster are returning the - minimum cluster size specified in their statistics output""" - sizes = set() - for node in self._cluster_statistics: - sizes.add(int(self._cluster_statistics[node]['cluster_size'])) - - if (len(sizes)) > 1: # if we are getting more than 1 size, lets say no - return False - if (min(sizes)) >= self.module.params['min_cluster_size']: - return True - return False - - def _cluster_stable(self): - """Added 4.3: - cluster-stable:size=;ignore-migrations=;namespace= - Returns the current 'cluster_key' when the following are satisfied: - - If 'size' is specified then the target node's 'cluster-size' - must match size. - If 'ignore-migrations' is either unspecified or 'false' then - the target node's migrations counts must be zero for the provided - 'namespace' or all namespaces if 'namespace' is not provided.""" - cluster_key = set() - cluster_key.add(self._info_cmd_helper('statistics')['cluster_key']) - cmd = "cluster-stable:" - target_cluster_size = self.module.params['target_cluster_size'] - if target_cluster_size is not None: - cmd = cmd + "size=" + str(target_cluster_size) + ";" - for node in self._nodes: - try: - cluster_key.add(self._info_cmd_helper(cmd, node)) - except aerospike.exception.ServerError as e: # unstable-cluster is returned in form of Exception - if 'unstable-cluster' in e.msg: - return False - raise e - if len(cluster_key) == 1: - return True - return False - - def _cluster_good_state(self): - """checks a few things to make sure we're OK to say the cluster - has no migs. It could be in a unhealthy condition that does not allow - migs, or a split brain""" - if self._cluster_key_consistent() is not True: - return False, "Cluster key inconsistent." - if self._is_min_cluster_size() is not True: - return False, "Cluster min size not reached." - if self._cluster_migrates_allowed() is not True: - return False, "migrate_allowed is false somewhere." - return True, "OK." - - def has_migs(self, local=True): - """returns a boolean, False if no migrations otherwise True""" - consecutive_good = 0 - try_num = 0 - skip_reason = list() - while \ - try_num < int(self.module.params['tries_limit']) and \ - consecutive_good < \ - int(self.module.params['consecutive_good_checks']): - - self._update_nodes_list() - self._update_cluster_statistics() - - # These checks are outside of the while loop because - # we probably want to skip & sleep instead of failing entirely - stable, reason = self._cluster_good_state() - if stable is not True: - skip_reason.append( - "Skipping on try#" + str(try_num) + - " for reason:" + reason - ) - else: - if self._can_use_cluster_stable(): - if self._cluster_stable(): - consecutive_good += 1 - else: - consecutive_good = 0 - skip_reason.append( - "Skipping on try#" + str(try_num) + - " for reason:" + " cluster_stable" - ) - elif self._has_migs(local): - # print("_has_migs") - skip_reason.append( - "Skipping on try#" + str(try_num) + - " for reason:" + " migrations" - ) - consecutive_good = 0 - else: - consecutive_good += 1 - if consecutive_good == self.module.params[ - 'consecutive_good_checks']: - break - try_num += 1 - sleep(self.module.params['sleep_between_checks']) - # print(skip_reason) - if consecutive_good == self.module.params['consecutive_good_checks']: - return False, None - return True, skip_reason - - -def main(): - """main method for ansible module""" - run_module() - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_database.py b/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_database.py deleted file mode 100644 index 6601b301..00000000 --- a/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_database.py +++ /dev/null @@ -1,142 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Kamil Szczygiel -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: influxdb_database -short_description: Manage InfluxDB databases -description: - - Manage InfluxDB databases. -author: "Kamil Szczygiel (@kamsz)" -requirements: - - "python >= 2.6" - - "influxdb >= 0.9" - - requests -options: - database_name: - description: - - Name of the database. - required: true - type: str - state: - description: - - Determines if the database should be created or destroyed. - choices: [ absent, present ] - default: present - type: str -extends_documentation_fragment: -- community.general.influxdb - -''' - -EXAMPLES = r''' -# Example influxdb_database command from Ansible Playbooks -- name: Create database - community.general.influxdb_database: - hostname: "{{influxdb_ip_address}}" - database_name: "{{influxdb_database_name}}" - -- name: Destroy database - community.general.influxdb_database: - hostname: "{{influxdb_ip_address}}" - database_name: "{{influxdb_database_name}}" - state: absent - -- name: Create database using custom credentials - community.general.influxdb_database: - hostname: "{{influxdb_ip_address}}" - username: "{{influxdb_username}}" - password: "{{influxdb_password}}" - database_name: "{{influxdb_database_name}}" - ssl: yes - validate_certs: yes -''' - -RETURN = r''' -# only defaults -''' - -try: - import requests.exceptions - from influxdb import exceptions -except ImportError: - pass - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb - - -def find_database(module, client, database_name): - database = None - - try: - databases = client.get_list_database() - for db in databases: - if db['name'] == database_name: - database = db - break - except requests.exceptions.ConnectionError as e: - module.fail_json(msg=str(e)) - return database - - -def create_database(module, client, database_name): - if not module.check_mode: - try: - client.create_database(database_name) - except requests.exceptions.ConnectionError as e: - module.fail_json(msg=str(e)) - - module.exit_json(changed=True) - - -def drop_database(module, client, database_name): - if not module.check_mode: - try: - client.drop_database(database_name) - except exceptions.InfluxDBClientError as e: - module.fail_json(msg=e.content) - - module.exit_json(changed=True) - - -def main(): - argument_spec = InfluxDb.influxdb_argument_spec() - argument_spec.update( - database_name=dict(required=True, type='str'), - state=dict(default='present', type='str', choices=['present', 'absent']) - ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True - ) - - state = module.params['state'] - - influxdb = InfluxDb(module) - client = influxdb.connect_to_influxdb() - database_name = influxdb.database_name - database = find_database(module, client, database_name) - - if state == 'present': - if database: - module.exit_json(changed=False) - else: - create_database(module, client, database_name) - - if state == 'absent': - if database: - drop_database(module, client, database_name) - else: - module.exit_json(changed=False) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_query.py b/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_query.py deleted file mode 100644 index bff6fa98..00000000 --- a/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_query.py +++ /dev/null @@ -1,101 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2017, René Moser -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: influxdb_query -short_description: Query data points from InfluxDB -description: - - Query data points from InfluxDB. -author: "René Moser (@resmo)" -requirements: - - "python >= 2.6" - - "influxdb >= 0.9" -options: - query: - description: - - Query to be executed. - required: true - type: str - database_name: - description: - - Name of the database. - required: true - type: str -extends_documentation_fragment: -- community.general.influxdb - -''' - -EXAMPLES = r''' -- name: Query connections - community.general.influxdb_query: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - query: "select mean(value) from connections" - register: connection - -- name: Query connections with tags filters - community.general.influxdb_query: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - query: "select mean(value) from connections where region='zue01' and host='server01'" - register: connection - -- name: Print results from the query - ansible.builtin.debug: - var: connection.query_results -''' - -RETURN = r''' -query_results: - description: Result from the query - returned: success - type: list - sample: - - mean: 1245.5333333333333 - time: "1970-01-01T00:00:00Z" -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native -from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb - - -class AnsibleInfluxDBRead(InfluxDb): - - def read_by_query(self, query): - client = self.connect_to_influxdb() - try: - rs = client.query(query) - if rs: - return list(rs.get_points()) - except Exception as e: - self.module.fail_json(msg=to_native(e)) - - -def main(): - argument_spec = InfluxDb.influxdb_argument_spec() - argument_spec.update( - query=dict(type='str', required=True), - database_name=dict(required=True, type='str'), - ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True - ) - - influx = AnsibleInfluxDBRead(module) - query = module.params.get('query') - results = influx.read_by_query(query) - module.exit_json(changed=True, query_results=results) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_retention_policy.py b/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_retention_policy.py deleted file mode 100644 index 6cb45229..00000000 --- a/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_retention_policy.py +++ /dev/null @@ -1,343 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Kamil Szczygiel -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: influxdb_retention_policy -short_description: Manage InfluxDB retention policies -description: - - Manage InfluxDB retention policies. -author: "Kamil Szczygiel (@kamsz)" -requirements: - - "python >= 2.6" - - "influxdb >= 0.9" - - requests -options: - database_name: - description: - - Name of the database. - required: true - type: str - policy_name: - description: - - Name of the retention policy. - required: true - type: str - state: - description: - - State of the retention policy. - choices: [ absent, present ] - default: present - type: str - version_added: 3.1.0 - duration: - description: - - Determines how long InfluxDB should keep the data. If specified, it - should be C(INF) or at least one hour. If not specified, C(INF) is - assumed. Supports complex duration expressions with multiple units. - - Required only if I(state) is set to C(present). - type: str - replication: - description: - - Determines how many independent copies of each point are stored in the cluster. - - Required only if I(state) is set to C(present). - type: int - default: - description: - - Sets the retention policy as default retention policy. - type: bool - default: false - shard_group_duration: - description: - - Determines the time range covered by a shard group. If specified it - must be at least one hour. If none, it's determined by InfluxDB by - the rentention policy's duration. Supports complex duration expressions - with multiple units. - type: str - version_added: '2.0.0' -extends_documentation_fragment: -- community.general.influxdb - -''' - -EXAMPLES = r''' -# Example influxdb_retention_policy command from Ansible Playbooks -- name: Create 1 hour retention policy - community.general.influxdb_retention_policy: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - policy_name: test - duration: 1h - replication: 1 - ssl: yes - validate_certs: yes - state: present - -- name: Create 1 day retention policy with 1 hour shard group duration - community.general.influxdb_retention_policy: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - policy_name: test - duration: 1d - replication: 1 - shard_group_duration: 1h - state: present - -- name: Create 1 week retention policy with 1 day shard group duration - community.general.influxdb_retention_policy: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - policy_name: test - duration: 1w - replication: 1 - shard_group_duration: 1d - state: present - -- name: Create infinite retention policy with 1 week of shard group duration - community.general.influxdb_retention_policy: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - policy_name: test - duration: INF - replication: 1 - ssl: no - validate_certs: no - shard_group_duration: 1w - state: present - -- name: Create retention policy with complex durations - community.general.influxdb_retention_policy: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - policy_name: test - duration: 5d1h30m - replication: 1 - ssl: no - validate_certs: no - shard_group_duration: 1d10h30m - state: present - -- name: Drop retention policy - community.general.influxdb_retention_policy: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - policy_name: test - state: absent -''' - -RETURN = r''' -# only defaults -''' - -import re - -try: - import requests.exceptions - from influxdb import exceptions -except ImportError: - pass - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb -from ansible.module_utils.common.text.converters import to_native - - -VALID_DURATION_REGEX = re.compile(r'^(INF|(\d+(ns|u|µ|ms|s|m|h|d|w)))+$') - -DURATION_REGEX = re.compile(r'(\d+)(ns|u|µ|ms|s|m|h|d|w)') -EXTENDED_DURATION_REGEX = re.compile(r'(?:(\d+)(ns|u|µ|ms|m|h|d|w)|(\d+(?:\.\d+)?)(s))') - -DURATION_UNIT_NANOSECS = { - 'ns': 1, - 'u': 1000, - 'µ': 1000, - 'ms': 1000 * 1000, - 's': 1000 * 1000 * 1000, - 'm': 1000 * 1000 * 1000 * 60, - 'h': 1000 * 1000 * 1000 * 60 * 60, - 'd': 1000 * 1000 * 1000 * 60 * 60 * 24, - 'w': 1000 * 1000 * 1000 * 60 * 60 * 24 * 7, -} - -MINIMUM_VALID_DURATION = 1 * DURATION_UNIT_NANOSECS['h'] -MINIMUM_VALID_SHARD_GROUP_DURATION = 1 * DURATION_UNIT_NANOSECS['h'] - - -def check_duration_literal(value): - return VALID_DURATION_REGEX.search(value) is not None - - -def parse_duration_literal(value, extended=False): - duration = 0.0 - - if value == "INF": - return duration - - lookup = (EXTENDED_DURATION_REGEX if extended else DURATION_REGEX).findall(value) - - for duration_literal in lookup: - filtered_literal = list(filter(None, duration_literal)) - duration_val = float(filtered_literal[0]) - duration += duration_val * DURATION_UNIT_NANOSECS[filtered_literal[1]] - - return duration - - -def find_retention_policy(module, client): - database_name = module.params['database_name'] - policy_name = module.params['policy_name'] - hostname = module.params['hostname'] - retention_policy = None - - try: - retention_policies = client.get_list_retention_policies(database=database_name) - for policy in retention_policies: - if policy['name'] == policy_name: - retention_policy = policy - break - except requests.exceptions.ConnectionError as e: - module.fail_json(msg="Cannot connect to database %s on %s : %s" % (database_name, hostname, to_native(e))) - - if retention_policy is not None: - retention_policy["duration"] = parse_duration_literal(retention_policy["duration"], extended=True) - retention_policy["shardGroupDuration"] = parse_duration_literal(retention_policy["shardGroupDuration"], extended=True) - - return retention_policy - - -def create_retention_policy(module, client): - database_name = module.params['database_name'] - policy_name = module.params['policy_name'] - duration = module.params['duration'] - replication = module.params['replication'] - default = module.params['default'] - shard_group_duration = module.params['shard_group_duration'] - - if not check_duration_literal(duration): - module.fail_json(msg="Failed to parse value of duration") - - influxdb_duration_format = parse_duration_literal(duration) - if influxdb_duration_format != 0 and influxdb_duration_format < MINIMUM_VALID_DURATION: - module.fail_json(msg="duration value must be at least 1h") - - if shard_group_duration is not None: - if not check_duration_literal(shard_group_duration): - module.fail_json(msg="Failed to parse value of shard_group_duration") - - influxdb_shard_group_duration_format = parse_duration_literal(shard_group_duration) - if influxdb_shard_group_duration_format < MINIMUM_VALID_SHARD_GROUP_DURATION: - module.fail_json(msg="shard_group_duration value must be finite and at least 1h") - - if not module.check_mode: - try: - if shard_group_duration: - client.create_retention_policy(policy_name, duration, replication, database_name, default, - shard_group_duration) - else: - client.create_retention_policy(policy_name, duration, replication, database_name, default) - except exceptions.InfluxDBClientError as e: - module.fail_json(msg=e.content) - module.exit_json(changed=True) - - -def alter_retention_policy(module, client, retention_policy): - database_name = module.params['database_name'] - policy_name = module.params['policy_name'] - duration = module.params['duration'] - replication = module.params['replication'] - default = module.params['default'] - shard_group_duration = module.params['shard_group_duration'] - - changed = False - - if not check_duration_literal(duration): - module.fail_json(msg="Failed to parse value of duration") - - influxdb_duration_format = parse_duration_literal(duration) - if influxdb_duration_format != 0 and influxdb_duration_format < MINIMUM_VALID_DURATION: - module.fail_json(msg="duration value must be at least 1h") - - if shard_group_duration is None: - influxdb_shard_group_duration_format = retention_policy["shardGroupDuration"] - else: - if not check_duration_literal(shard_group_duration): - module.fail_json(msg="Failed to parse value of shard_group_duration") - - influxdb_shard_group_duration_format = parse_duration_literal(shard_group_duration) - if influxdb_shard_group_duration_format < MINIMUM_VALID_SHARD_GROUP_DURATION: - module.fail_json(msg="shard_group_duration value must be finite and at least 1h") - - if (retention_policy['duration'] != influxdb_duration_format or - retention_policy['shardGroupDuration'] != influxdb_shard_group_duration_format or - retention_policy['replicaN'] != int(replication) or - retention_policy['default'] != default): - if not module.check_mode: - try: - client.alter_retention_policy(policy_name, database_name, duration, replication, default, - shard_group_duration) - except exceptions.InfluxDBClientError as e: - module.fail_json(msg=e.content) - changed = True - module.exit_json(changed=changed) - - -def drop_retention_policy(module, client): - database_name = module.params['database_name'] - policy_name = module.params['policy_name'] - - if not module.check_mode: - try: - client.drop_retention_policy(policy_name, database_name) - except exceptions.InfluxDBClientError as e: - module.fail_json(msg=e.content) - module.exit_json(changed=True) - - -def main(): - argument_spec = InfluxDb.influxdb_argument_spec() - argument_spec.update( - state=dict(default='present', type='str', choices=['present', 'absent']), - database_name=dict(required=True, type='str'), - policy_name=dict(required=True, type='str'), - duration=dict(type='str'), - replication=dict(type='int'), - default=dict(default=False, type='bool'), - shard_group_duration=dict(type='str'), - ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - required_if=( - ('state', 'present', ['duration', 'replication']), - ), - ) - - state = module.params['state'] - - influxdb = InfluxDb(module) - client = influxdb.connect_to_influxdb() - - retention_policy = find_retention_policy(module, client) - - if state == 'present': - if retention_policy: - alter_retention_policy(module, client, retention_policy) - else: - create_retention_policy(module, client) - - if state == 'absent': - if retention_policy: - drop_retention_policy(module, client) - else: - module.exit_json(changed=False) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_user.py b/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_user.py deleted file mode 100644 index 76524d86..00000000 --- a/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_user.py +++ /dev/null @@ -1,285 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, Vitaliy Zhhuta -# insipred by Kamil Szczygiel influxdb_database module -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: influxdb_user -short_description: Manage InfluxDB users -description: - - Manage InfluxDB users. -author: "Vitaliy Zhhuta (@zhhuta)" -requirements: - - "python >= 2.6" - - "influxdb >= 0.9" -options: - user_name: - description: - - Name of the user. - required: True - type: str - user_password: - description: - - Password to be set for the user. - required: false - type: str - admin: - description: - - Whether the user should be in the admin role or not. - - Since version 2.8, the role will also be updated. - default: no - type: bool - state: - description: - - State of the user. - choices: [ absent, present ] - default: present - type: str - grants: - description: - - Privileges to grant to this user. - - Takes a list of dicts containing the "database" and "privilege" keys. - - If this argument is not provided, the current grants will be left alone. - - If an empty list is provided, all grants for the user will be removed. - type: list - elements: dict -extends_documentation_fragment: -- community.general.influxdb - -''' - -EXAMPLES = r''' -- name: Create a user on localhost using default login credentials - community.general.influxdb_user: - user_name: john - user_password: s3cr3t - -- name: Create a user on localhost using custom login credentials - community.general.influxdb_user: - user_name: john - user_password: s3cr3t - login_username: "{{ influxdb_username }}" - login_password: "{{ influxdb_password }}" - -- name: Create an admin user on a remote host using custom login credentials - community.general.influxdb_user: - user_name: john - user_password: s3cr3t - admin: yes - hostname: "{{ influxdb_hostname }}" - login_username: "{{ influxdb_username }}" - login_password: "{{ influxdb_password }}" - -- name: Create a user on localhost with privileges - community.general.influxdb_user: - user_name: john - user_password: s3cr3t - login_username: "{{ influxdb_username }}" - login_password: "{{ influxdb_password }}" - grants: - - database: 'collectd' - privilege: 'WRITE' - - database: 'graphite' - privilege: 'READ' - -- name: Destroy a user using custom login credentials - community.general.influxdb_user: - user_name: john - login_username: "{{ influxdb_username }}" - login_password: "{{ influxdb_password }}" - state: absent -''' - -RETURN = r''' -#only defaults -''' - -import json - -from ansible.module_utils.urls import ConnectionError -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native -import ansible_collections.community.general.plugins.module_utils.influxdb as influx - - -def find_user(module, client, user_name): - user_result = None - - try: - users = client.get_list_users() - for user in users: - if user['user'] == user_name: - user_result = user - break - except ConnectionError as e: - module.fail_json(msg=to_native(e)) - return user_result - - -def check_user_password(module, client, user_name, user_password): - try: - client.switch_user(user_name, user_password) - client.get_list_users() - except influx.exceptions.InfluxDBClientError as e: - if e.code == 401: - return False - except ConnectionError as e: - module.fail_json(msg=to_native(e)) - finally: - # restore previous user - client.switch_user(module.params['username'], module.params['password']) - return True - - -def set_user_password(module, client, user_name, user_password): - if not module.check_mode: - try: - client.set_user_password(user_name, user_password) - except ConnectionError as e: - module.fail_json(msg=to_native(e)) - - -def create_user(module, client, user_name, user_password, admin): - if not module.check_mode: - try: - client.create_user(user_name, user_password, admin) - except ConnectionError as e: - module.fail_json(msg=to_native(e)) - - -def drop_user(module, client, user_name): - if not module.check_mode: - try: - client.drop_user(user_name) - except influx.exceptions.InfluxDBClientError as e: - module.fail_json(msg=e.content) - - module.exit_json(changed=True) - - -def set_user_grants(module, client, user_name, grants): - changed = False - - try: - current_grants = client.get_list_privileges(user_name) - parsed_grants = [] - # Fix privileges wording - for i, v in enumerate(current_grants): - if v['privilege'] != 'NO PRIVILEGES': - if v['privilege'] == 'ALL PRIVILEGES': - v['privilege'] = 'ALL' - parsed_grants.append(v) - - # check if the current grants are included in the desired ones - for current_grant in parsed_grants: - if current_grant not in grants: - if not module.check_mode: - client.revoke_privilege(current_grant['privilege'], - current_grant['database'], - user_name) - changed = True - - # check if the desired grants are included in the current ones - for grant in grants: - if grant not in parsed_grants: - if not module.check_mode: - client.grant_privilege(grant['privilege'], - grant['database'], - user_name) - changed = True - - except influx.exceptions.InfluxDBClientError as e: - module.fail_json(msg=e.content) - - return changed - - -INFLUX_AUTH_FIRST_USER_REQUIRED = "error authorizing query: create admin user first or disable authentication" - - -def main(): - argument_spec = influx.InfluxDb.influxdb_argument_spec() - argument_spec.update( - state=dict(default='present', type='str', choices=['present', 'absent']), - user_name=dict(required=True, type='str'), - user_password=dict(required=False, type='str', no_log=True), - admin=dict(default='False', type='bool'), - grants=dict(type='list', elements='dict'), - ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True - ) - - state = module.params['state'] - user_name = module.params['user_name'] - user_password = module.params['user_password'] - admin = module.params['admin'] - grants = module.params['grants'] - influxdb = influx.InfluxDb(module) - client = influxdb.connect_to_influxdb() - - user = None - try: - user = find_user(module, client, user_name) - except influx.exceptions.InfluxDBClientError as e: - if e.code == 403: - reason = None - try: - msg = json.loads(e.content) - reason = msg["error"] - except (KeyError, ValueError): - module.fail_json(msg=to_native(e)) - - if reason != INFLUX_AUTH_FIRST_USER_REQUIRED: - module.fail_json(msg=to_native(e)) - else: - module.fail_json(msg=to_native(e)) - - changed = False - - if state == 'present': - if user: - if not check_user_password(module, client, user_name, user_password) and user_password is not None: - set_user_password(module, client, user_name, user_password) - changed = True - - try: - if admin and not user['admin']: - if not module.check_mode: - client.grant_admin_privileges(user_name) - changed = True - elif not admin and user['admin']: - if not module.check_mode: - client.revoke_admin_privileges(user_name) - changed = True - except influx.exceptions.InfluxDBClientError as e: - module.fail_json(msg=to_native(e)) - - else: - user_password = user_password or '' - create_user(module, client, user_name, user_password, admin) - changed = True - - if grants is not None: - if set_user_grants(module, client, user_name, grants): - changed = True - - module.exit_json(changed=changed) - - if state == 'absent': - if user: - drop_user(module, client, user_name) - else: - module.exit_json(changed=False) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_write.py b/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_write.py deleted file mode 100644 index e34fe9c2..00000000 --- a/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_write.py +++ /dev/null @@ -1,96 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2017, René Moser -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: influxdb_write -short_description: Write data points into InfluxDB -description: - - Write data points into InfluxDB. -author: "René Moser (@resmo)" -requirements: - - "python >= 2.6" - - "influxdb >= 0.9" -options: - data_points: - description: - - Data points as dict to write into the database. - required: true - type: list - elements: dict - database_name: - description: - - Name of the database. - required: true - type: str -extends_documentation_fragment: -- community.general.influxdb - -''' - -EXAMPLES = r''' -- name: Write points into database - community.general.influxdb_write: - hostname: "{{influxdb_ip_address}}" - database_name: "{{influxdb_database_name}}" - data_points: - - measurement: connections - tags: - host: server01 - region: us-west - time: "{{ ansible_date_time.iso8601 }}" - fields: - value: 2000 - - measurement: connections - tags: - host: server02 - region: us-east - time: "{{ ansible_date_time.iso8601 }}" - fields: - value: 3000 -''' - -RETURN = r''' -# only defaults -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native -from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb - - -class AnsibleInfluxDBWrite(InfluxDb): - - def write_data_point(self, data_points): - client = self.connect_to_influxdb() - - try: - client.write_points(data_points) - except Exception as e: - self.module.fail_json(msg=to_native(e)) - - -def main(): - argument_spec = InfluxDb.influxdb_argument_spec() - argument_spec.update( - data_points=dict(required=True, type='list', elements='dict'), - database_name=dict(required=True, type='str'), - ) - module = AnsibleModule( - argument_spec=argument_spec, - ) - - influx = AnsibleInfluxDBWrite(module) - data_points = module.params.get('data_points') - influx.write_data_point(data_points) - module.exit_json(changed=True) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/database/misc/elasticsearch_plugin.py b/ansible_collections/community/general/plugins/modules/database/misc/elasticsearch_plugin.py deleted file mode 100644 index bc7df931..00000000 --- a/ansible_collections/community/general/plugins/modules/database/misc/elasticsearch_plugin.py +++ /dev/null @@ -1,301 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2015, Mathew Davies -# (c) 2017, Sam Doran -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: elasticsearch_plugin -short_description: Manage Elasticsearch plugins -description: - - Manages Elasticsearch plugins. -author: - - Mathew Davies (@ThePixelDeveloper) - - Sam Doran (@samdoran) -options: - name: - description: - - Name of the plugin to install. - required: True - type: str - state: - description: - - Desired state of a plugin. - choices: ["present", "absent"] - default: present - type: str - src: - description: - - Optionally set the source location to retrieve the plugin from. This can be a file:// - URL to install from a local file, or a remote URL. If this is not set, the plugin - location is just based on the name. - - The name parameter must match the descriptor in the plugin ZIP specified. - - Is only used if the state would change, which is solely checked based on the name - parameter. If, for example, the plugin is already installed, changing this has no - effect. - - For ES 1.x use url. - required: False - type: str - url: - description: - - Set exact URL to download the plugin from (Only works for ES 1.x). - - For ES 2.x and higher, use src. - required: False - type: str - timeout: - description: - - "Timeout setting: 30s, 1m, 1h..." - - Only valid for Elasticsearch < 5.0. This option is ignored for Elasticsearch > 5.0. - default: 1m - type: str - force: - description: - - "Force batch mode when installing plugins. This is only necessary if a plugin requires additional permissions and console detection fails." - default: False - type: bool - plugin_bin: - description: - - Location of the plugin binary. If this file is not found, the default plugin binaries will be used. - - The default changed in Ansible 2.4 to None. - type: path - plugin_dir: - description: - - Your configured plugin directory specified in Elasticsearch - default: /usr/share/elasticsearch/plugins/ - type: path - proxy_host: - description: - - Proxy host to use during plugin installation - type: str - proxy_port: - description: - - Proxy port to use during plugin installation - type: str - version: - description: - - Version of the plugin to be installed. - If plugin exists with previous version, it will NOT be updated - type: str -''' - -EXAMPLES = ''' -- name: Install Elasticsearch Head plugin in Elasticsearch 2.x - community.general.elasticsearch_plugin: - name: mobz/elasticsearch-head - state: present - -- name: Install a specific version of Elasticsearch Head in Elasticsearch 2.x - community.general.elasticsearch_plugin: - name: mobz/elasticsearch-head - version: 2.0.0 - -- name: Uninstall Elasticsearch head plugin in Elasticsearch 2.x - community.general.elasticsearch_plugin: - name: mobz/elasticsearch-head - state: absent - -- name: Install a specific plugin in Elasticsearch >= 5.0 - community.general.elasticsearch_plugin: - name: analysis-icu - state: present - -- name: Install the ingest-geoip plugin with a forced installation - community.general.elasticsearch_plugin: - name: ingest-geoip - state: present - force: yes -''' - -import os - -from ansible.module_utils.basic import AnsibleModule - - -PACKAGE_STATE_MAP = dict( - present="install", - absent="remove" -) - -PLUGIN_BIN_PATHS = tuple([ - '/usr/share/elasticsearch/bin/elasticsearch-plugin', - '/usr/share/elasticsearch/bin/plugin' -]) - - -def parse_plugin_repo(string): - elements = string.split("/") - - # We first consider the simplest form: pluginname - repo = elements[0] - - # We consider the form: username/pluginname - if len(elements) > 1: - repo = elements[1] - - # remove elasticsearch- prefix - # remove es- prefix - for string in ("elasticsearch-", "es-"): - if repo.startswith(string): - return repo[len(string):] - - return repo - - -def is_plugin_present(plugin_name, plugin_dir): - return os.path.isdir(os.path.join(plugin_dir, plugin_name)) - - -def parse_error(string): - reason = "ERROR: " - try: - return string[string.index(reason) + len(reason):].strip() - except ValueError: - return string - - -def install_plugin(module, plugin_bin, plugin_name, version, src, url, proxy_host, proxy_port, timeout, force): - cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"]] - is_old_command = (os.path.basename(plugin_bin) == 'plugin') - - # Timeout and version are only valid for plugin, not elasticsearch-plugin - if is_old_command: - if timeout: - cmd_args.append("--timeout %s" % timeout) - - if version: - plugin_name = plugin_name + '/' + version - cmd_args[2] = plugin_name - - if proxy_host and proxy_port: - cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % (proxy_host, proxy_port)) - - # Legacy ES 1.x - if url: - cmd_args.append("--url %s" % url) - - if force: - cmd_args.append("--batch") - if src: - cmd_args.append(src) - else: - cmd_args.append(plugin_name) - - cmd = " ".join(cmd_args) - - if module.check_mode: - rc, out, err = 0, "check mode", "" - else: - rc, out, err = module.run_command(cmd) - - if rc != 0: - reason = parse_error(out) - module.fail_json(msg="Installing plugin '%s' failed: %s" % (plugin_name, reason), err=err) - - return True, cmd, out, err - - -def remove_plugin(module, plugin_bin, plugin_name): - cmd_args = [plugin_bin, PACKAGE_STATE_MAP["absent"], parse_plugin_repo(plugin_name)] - - cmd = " ".join(cmd_args) - - if module.check_mode: - rc, out, err = 0, "check mode", "" - else: - rc, out, err = module.run_command(cmd) - - if rc != 0: - reason = parse_error(out) - module.fail_json(msg="Removing plugin '%s' failed: %s" % (plugin_name, reason), err=err) - - return True, cmd, out, err - - -def get_plugin_bin(module, plugin_bin=None): - # Use the plugin_bin that was supplied first before trying other options - valid_plugin_bin = None - if plugin_bin and os.path.isfile(plugin_bin): - valid_plugin_bin = plugin_bin - - else: - # Add the plugin_bin passed into the module to the top of the list of paths to test, - # testing for that binary name first before falling back to the default paths. - bin_paths = list(PLUGIN_BIN_PATHS) - if plugin_bin and plugin_bin not in bin_paths: - bin_paths.insert(0, plugin_bin) - - # Get separate lists of dirs and binary names from the full paths to the - # plugin binaries. - plugin_dirs = list(set([os.path.dirname(x) for x in bin_paths])) - plugin_bins = list(set([os.path.basename(x) for x in bin_paths])) - - # Check for the binary names in the default system paths as well as the path - # specified in the module arguments. - for bin_file in plugin_bins: - valid_plugin_bin = module.get_bin_path(bin_file, opt_dirs=plugin_dirs) - if valid_plugin_bin: - break - - if not valid_plugin_bin: - module.fail_json(msg='%s does not exist and no other valid plugin installers were found. Make sure Elasticsearch is installed.' % plugin_bin) - - return valid_plugin_bin - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - state=dict(default="present", choices=list(PACKAGE_STATE_MAP.keys())), - src=dict(default=None), - url=dict(default=None), - timeout=dict(default="1m"), - force=dict(type='bool', default=False), - plugin_bin=dict(type="path"), - plugin_dir=dict(default="/usr/share/elasticsearch/plugins/", type="path"), - proxy_host=dict(default=None), - proxy_port=dict(default=None), - version=dict(default=None) - ), - mutually_exclusive=[("src", "url")], - supports_check_mode=True - ) - - name = module.params["name"] - state = module.params["state"] - url = module.params["url"] - src = module.params["src"] - timeout = module.params["timeout"] - force = module.params["force"] - plugin_bin = module.params["plugin_bin"] - plugin_dir = module.params["plugin_dir"] - proxy_host = module.params["proxy_host"] - proxy_port = module.params["proxy_port"] - version = module.params["version"] - - # Search provided path and system paths for valid binary - plugin_bin = get_plugin_bin(module, plugin_bin) - - repo = parse_plugin_repo(name) - present = is_plugin_present(repo, plugin_dir) - - # skip if the state is correct - if (present and state == "present") or (state == "absent" and not present): - module.exit_json(changed=False, name=name, state=state) - - if state == "present": - changed, cmd, out, err = install_plugin(module, plugin_bin, name, version, src, url, proxy_host, proxy_port, timeout, force) - - elif state == "absent": - changed, cmd, out, err = remove_plugin(module, plugin_bin, name) - - module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/database/misc/kibana_plugin.py b/ansible_collections/community/general/plugins/modules/database/misc/kibana_plugin.py deleted file mode 100644 index db5091e4..00000000 --- a/ansible_collections/community/general/plugins/modules/database/misc/kibana_plugin.py +++ /dev/null @@ -1,278 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2016, Thierno IB. BARRY @barryib -# Sponsored by Polyconseil http://polyconseil.fr. -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: kibana_plugin -short_description: Manage Kibana plugins -description: - - This module can be used to manage Kibana plugins. -author: Thierno IB. BARRY (@barryib) -options: - name: - description: - - Name of the plugin to install. - required: True - type: str - state: - description: - - Desired state of a plugin. - choices: ["present", "absent"] - default: present - type: str - url: - description: - - Set exact URL to download the plugin from. - - For local file, prefix its absolute path with file:// - type: str - timeout: - description: - - "Timeout setting: 30s, 1m, 1h etc." - default: 1m - type: str - plugin_bin: - description: - - Location of the Kibana binary. - default: /opt/kibana/bin/kibana - type: path - plugin_dir: - description: - - Your configured plugin directory specified in Kibana. - default: /opt/kibana/installedPlugins/ - type: path - version: - description: - - Version of the plugin to be installed. - - If plugin exists with previous version, plugin will NOT be updated unless C(force) is set to yes. - type: str - force: - description: - - Delete and re-install the plugin. Can be useful for plugins update. - type: bool - default: false - allow_root: - description: - - Whether to allow C(kibana) and C(kibana-plugin) to be run as root. Passes the C(--allow-root) flag to these commands. - type: bool - default: false - version_added: 2.3.0 -''' - -EXAMPLES = ''' -- name: Install Elasticsearch head plugin - community.general.kibana_plugin: - state: present - name: elasticsearch/marvel - -- name: Install specific version of a plugin - community.general.kibana_plugin: - state: present - name: elasticsearch/marvel - version: '2.3.3' - -- name: Uninstall Elasticsearch head plugin - community.general.kibana_plugin: - state: absent - name: elasticsearch/marvel -''' - -RETURN = ''' -cmd: - description: the launched command during plugin management (install / remove) - returned: success - type: str -name: - description: the plugin name to install or remove - returned: success - type: str -url: - description: the url from where the plugin is installed from - returned: success - type: str -timeout: - description: the timeout for plugin download - returned: success - type: str -stdout: - description: the command stdout - returned: success - type: str -stderr: - description: the command stderr - returned: success - type: str -state: - description: the state for the managed plugin - returned: success - type: str -''' - -import os -from ansible.module_utils.basic import AnsibleModule - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - - -PACKAGE_STATE_MAP = dict( - present="--install", - absent="--remove" -) - - -def parse_plugin_repo(string): - elements = string.split("/") - - # We first consider the simplest form: pluginname - repo = elements[0] - - # We consider the form: username/pluginname - if len(elements) > 1: - repo = elements[1] - - # remove elasticsearch- prefix - # remove es- prefix - for string in ("elasticsearch-", "es-"): - if repo.startswith(string): - return repo[len(string):] - - return repo - - -def is_plugin_present(plugin_dir, working_dir): - return os.path.isdir(os.path.join(working_dir, plugin_dir)) - - -def parse_error(string): - reason = "reason: " - try: - return string[string.index(reason) + len(reason):].strip() - except ValueError: - return string - - -def install_plugin(module, plugin_bin, plugin_name, url, timeout, allow_root, kibana_version='4.6'): - if LooseVersion(kibana_version) > LooseVersion('4.6'): - kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin') - cmd_args = [kibana_plugin_bin, "install"] - if url: - cmd_args.append(url) - else: - cmd_args.append(plugin_name) - else: - cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["present"], plugin_name] - - if url: - cmd_args.extend(["--url", url]) - - if timeout: - cmd_args.extend(["--timeout", timeout]) - - if allow_root: - cmd_args.append('--allow-root') - - if module.check_mode: - return True, " ".join(cmd_args), "check mode", "" - - rc, out, err = module.run_command(cmd_args) - if rc != 0: - reason = parse_error(out) - module.fail_json(msg=reason) - - return True, " ".join(cmd_args), out, err - - -def remove_plugin(module, plugin_bin, plugin_name, allow_root, kibana_version='4.6'): - if LooseVersion(kibana_version) > LooseVersion('4.6'): - kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin') - cmd_args = [kibana_plugin_bin, "remove", plugin_name] - else: - cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["absent"], plugin_name] - - if allow_root: - cmd_args.append('--allow-root') - - if module.check_mode: - return True, " ".join(cmd_args), "check mode", "" - - rc, out, err = module.run_command(cmd_args) - if rc != 0: - reason = parse_error(out) - module.fail_json(msg=reason) - - return True, " ".join(cmd_args), out, err - - -def get_kibana_version(module, plugin_bin, allow_root): - cmd_args = [plugin_bin, '--version'] - - if allow_root: - cmd_args.append('--allow-root') - - rc, out, err = module.run_command(cmd_args) - if rc != 0: - module.fail_json(msg="Failed to get Kibana version : %s" % err) - - return out.strip() - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - state=dict(default="present", choices=list(PACKAGE_STATE_MAP.keys())), - url=dict(default=None), - timeout=dict(default="1m"), - plugin_bin=dict(default="/opt/kibana/bin/kibana", type="path"), - plugin_dir=dict(default="/opt/kibana/installedPlugins/", type="path"), - version=dict(default=None), - force=dict(default=False, type="bool"), - allow_root=dict(default=False, type="bool"), - ), - supports_check_mode=True, - ) - - name = module.params["name"] - state = module.params["state"] - url = module.params["url"] - timeout = module.params["timeout"] - plugin_bin = module.params["plugin_bin"] - plugin_dir = module.params["plugin_dir"] - version = module.params["version"] - force = module.params["force"] - allow_root = module.params["allow_root"] - - changed, cmd, out, err = False, '', '', '' - - kibana_version = get_kibana_version(module, plugin_bin, allow_root) - - present = is_plugin_present(parse_plugin_repo(name), plugin_dir) - - # skip if the state is correct - if (present and state == "present" and not force) or (state == "absent" and not present and not force): - module.exit_json(changed=False, name=name, state=state) - - if version: - name = name + '/' + version - - if state == "present": - if force: - remove_plugin(module, plugin_bin, name, allow_root, kibana_version) - changed, cmd, out, err = install_plugin(module, plugin_bin, name, url, timeout, allow_root, kibana_version) - - elif state == "absent": - changed, cmd, out, err = remove_plugin(module, plugin_bin, name, allow_root, kibana_version) - - module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/database/misc/odbc.py b/ansible_collections/community/general/plugins/modules/database/misc/odbc.py deleted file mode 100644 index 5d1cdf88..00000000 --- a/ansible_collections/community/general/plugins/modules/database/misc/odbc.py +++ /dev/null @@ -1,168 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, John Westcott -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: odbc -author: "John Westcott IV (@john-westcott-iv)" -version_added: "1.0.0" -short_description: Execute SQL via ODBC -description: - - Read/Write info via ODBC drivers. -options: - dsn: - description: - - The connection string passed into ODBC. - required: yes - type: str - query: - description: - - The SQL query to perform. - required: yes - type: str - params: - description: - - Parameters to pass to the SQL query. - type: list - elements: str - commit: - description: - - Perform a commit after the execution of the SQL query. - - Some databases allow a commit after a select whereas others raise an exception. - - Default is C(true) to support legacy module behavior. - type: bool - default: yes - version_added: 1.3.0 -requirements: - - "python >= 2.6" - - "pyodbc" - -notes: - - "Like the command module, this module always returns changed = yes whether or not the query would change the database." - - "To alter this behavior you can use C(changed_when): [yes or no]." - - "For details about return values (description and row_count) see U(https://github.com/mkleehammer/pyodbc/wiki/Cursor)." -''' - -EXAMPLES = ''' -- name: Set some values in the test db - community.general.odbc: - dsn: "DRIVER={ODBC Driver 13 for SQL Server};Server=db.ansible.com;Database=my_db;UID=admin;PWD=password;" - query: "Select * from table_a where column1 = ?" - params: - - "value1" - commit: false - changed_when: no -''' - -RETURN = ''' -results: - description: List of lists of strings containing selected rows, likely empty for DDL statements. - returned: success - type: list - elements: list -description: - description: "List of dicts about the columns selected from the cursors, likely empty for DDL statements. See notes." - returned: success - type: list - elements: dict -row_count: - description: "The number of rows selected or modified according to the cursor defaults to -1. See notes." - returned: success - type: str -''' - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - -HAS_PYODBC = None -try: - import pyodbc - HAS_PYODBC = True -except ImportError as e: - HAS_PYODBC = False - - -def main(): - module = AnsibleModule( - argument_spec=dict( - dsn=dict(type='str', required=True, no_log=True), - query=dict(type='str', required=True), - params=dict(type='list', elements='str'), - commit=dict(type='bool', default=True), - ), - ) - - dsn = module.params.get('dsn') - query = module.params.get('query') - params = module.params.get('params') - commit = module.params.get('commit') - - if not HAS_PYODBC: - module.fail_json(msg=missing_required_lib('pyodbc')) - - # Try to make a connection with the DSN - connection = None - try: - connection = pyodbc.connect(dsn) - except Exception as e: - module.fail_json(msg='Failed to connect to DSN: {0}'.format(to_native(e))) - - result = dict( - changed=True, - description=[], - row_count=-1, - results=[], - ) - - try: - cursor = connection.cursor() - - if params: - cursor.execute(query, params) - else: - cursor.execute(query) - if commit: - cursor.commit() - try: - # Get the rows out into an 2d array - for row in cursor.fetchall(): - new_row = [] - for column in row: - new_row.append("{0}".format(column)) - result['results'].append(new_row) - - # Return additional information from the cursor - for row_description in cursor.description: - description = {} - description['name'] = row_description[0] - description['type'] = row_description[1].__name__ - description['display_size'] = row_description[2] - description['internal_size'] = row_description[3] - description['precision'] = row_description[4] - description['scale'] = row_description[5] - description['nullable'] = row_description[6] - result['description'].append(description) - - result['row_count'] = cursor.rowcount - except pyodbc.ProgrammingError as pe: - pass - except Exception as e: - module.fail_json(msg="Exception while reading rows: {0}".format(to_native(e))) - - cursor.close() - except Exception as e: - module.fail_json(msg="Failed to execute query: {0}".format(to_native(e))) - finally: - connection.close() - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/database/misc/redis.py b/ansible_collections/community/general/plugins/modules/database/misc/redis.py deleted file mode 100644 index 13a1f506..00000000 --- a/ansible_collections/community/general/plugins/modules/database/misc/redis.py +++ /dev/null @@ -1,328 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: redis -short_description: Various redis commands, replica and flush -description: - - Unified utility to interact with redis instances. -extends_documentation_fragment: - - community.general.redis -options: - command: - description: - - The selected redis command - - C(config) ensures a configuration setting on an instance. - - C(flush) flushes all the instance or a specified db. - - C(replica) sets a redis instance in replica or master mode. (C(slave) is an alias for C(replica).) - choices: [ config, flush, replica, slave ] - type: str - tls: - default: false - version_added: 4.6.0 - login_user: - version_added: 4.6.0 - validate_certs: - version_added: 4.6.0 - ca_certs: - version_added: 4.6.0 - master_host: - description: - - The host of the master instance [replica command] - type: str - master_port: - description: - - The port of the master instance [replica command] - type: int - replica_mode: - description: - - The mode of the redis instance [replica command] - - C(slave) is an alias for C(replica). - default: replica - choices: [ master, replica, slave ] - type: str - aliases: - - slave_mode - db: - description: - - The database to flush (used in db mode) [flush command] - type: int - flush_mode: - description: - - Type of flush (all the dbs in a redis instance or a specific one) - [flush command] - default: all - choices: [ all, db ] - type: str - name: - description: - - A redis config key. - type: str - value: - description: - - A redis config value. When memory size is needed, it is possible - to specify it in the usal form of 1KB, 2M, 400MB where the base is 1024. - Units are case insensitive i.e. 1m = 1mb = 1M = 1MB. - type: str - -notes: - - Requires the redis-py Python package on the remote host. You can - install it with pip (pip install redis) or with a package manager. - https://github.com/andymccurdy/redis-py - - If the redis master instance we are making replica of is password protected - this needs to be in the redis.conf in the masterauth variable - -seealso: - - module: community.general.redis_info -requirements: [ redis ] -author: "Xabier Larrakoetxea (@slok)" -''' - -EXAMPLES = ''' -- name: Set local redis instance to be a replica of melee.island on port 6377 - community.general.redis: - command: replica - master_host: melee.island - master_port: 6377 - -- name: Deactivate replica mode - community.general.redis: - command: replica - replica_mode: master - -- name: Flush all the redis db - community.general.redis: - command: flush - flush_mode: all - -- name: Flush only one db in a redis instance - community.general.redis: - command: flush - db: 1 - flush_mode: db - -- name: Configure local redis to have 10000 max clients - community.general.redis: - command: config - name: maxclients - value: 10000 - -- name: Configure local redis maxmemory to 4GB - community.general.redis: - command: config - name: maxmemory - value: 4GB - -- name: Configure local redis to have lua time limit of 100 ms - community.general.redis: - command: config - name: lua-time-limit - value: 100 -''' - -import traceback - -REDIS_IMP_ERR = None -try: - import redis -except ImportError: - REDIS_IMP_ERR = traceback.format_exc() - redis_found = False -else: - redis_found = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.formatters import human_to_bytes -from ansible.module_utils.common.text.converters import to_native -from ansible_collections.community.general.plugins.module_utils.redis import ( - fail_imports, redis_auth_argument_spec, redis_auth_params) -import re - - -# Redis module specific support methods. -def set_replica_mode(client, master_host, master_port): - try: - return client.slaveof(master_host, master_port) - except Exception: - return False - - -def set_master_mode(client): - try: - return client.slaveof() - except Exception: - return False - - -def flush(client, db=None): - try: - if not isinstance(db, int): - return client.flushall() - else: - # The passed client has been connected to the database already - return client.flushdb() - except Exception: - return False - - -# Module execution. -def main(): - redis_auth_args = redis_auth_argument_spec(tls_default=False) - module_args = dict( - command=dict(type='str', choices=['config', 'flush', 'replica', 'slave']), - master_host=dict(type='str'), - master_port=dict(type='int'), - replica_mode=dict(type='str', default='replica', choices=['master', 'replica', 'slave'], - aliases=["slave_mode"]), - db=dict(type='int'), - flush_mode=dict(type='str', default='all', choices=['all', 'db']), - name=dict(type='str'), - value=dict(type='str'), - ) - module_args.update(redis_auth_args) - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True, - ) - - fail_imports(module, module.params['tls']) - - redis_params = redis_auth_params(module) - - command = module.params['command'] - if command == "slave": - command = "replica" - - # Replica Command section ----------- - if command == "replica": - master_host = module.params['master_host'] - master_port = module.params['master_port'] - mode = module.params['replica_mode'] - if mode == "slave": - mode = "replica" - - # Check if we have all the data - if mode == "replica": # Only need data if we want to be replica - if not master_host: - module.fail_json(msg='In replica mode master host must be provided') - - if not master_port: - module.fail_json(msg='In replica mode master port must be provided') - - # Connect and check - r = redis.StrictRedis(**redis_params) - try: - r.ping() - except Exception as e: - module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) - - # Check if we are already in the mode that we want - info = r.info() - if mode == "master" and info["role"] == "master": - module.exit_json(changed=False, mode=mode) - - elif mode == "replica" and info["role"] == "slave" and info["master_host"] == master_host and info["master_port"] == master_port: - status = dict( - status=mode, - master_host=master_host, - master_port=master_port, - ) - module.exit_json(changed=False, mode=status) - else: - # Do the stuff - # (Check Check_mode before commands so the commands aren't evaluated - # if not necessary) - if mode == "replica": - if module.check_mode or set_replica_mode(r, master_host, master_port): - info = r.info() - status = { - 'status': mode, - 'master_host': master_host, - 'master_port': master_port, - } - module.exit_json(changed=True, mode=status) - else: - module.fail_json(msg='Unable to set replica mode') - - else: - if module.check_mode or set_master_mode(r): - module.exit_json(changed=True, mode=mode) - else: - module.fail_json(msg='Unable to set master mode') - - # flush Command section ----------- - elif command == "flush": - db = module.params['db'] - mode = module.params['flush_mode'] - - # Check if we have all the data - if mode == "db": - if db is None: - module.fail_json(msg="In db mode the db number must be provided") - - # Connect and check - r = redis.StrictRedis(db=db, **redis_params) - try: - r.ping() - except Exception as e: - module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) - - # Do the stuff - # (Check Check_mode before commands so the commands aren't evaluated - # if not necessary) - if mode == "all": - if module.check_mode or flush(r): - module.exit_json(changed=True, flushed=True) - else: # Flush never fails :) - module.fail_json(msg="Unable to flush all databases") - - else: - if module.check_mode or flush(r, db): - module.exit_json(changed=True, flushed=True, db=db) - else: # Flush never fails :) - module.fail_json(msg="Unable to flush '%d' database" % db) - elif command == 'config': - name = module.params['name'] - - try: # try to parse the value as if it were the memory size - if re.match(r'^\s*(\d*\.?\d*)\s*([A-Za-z]+)?\s*$', module.params['value'].upper()): - value = str(human_to_bytes(module.params['value'].upper())) - else: - value = module.params['value'] - except ValueError: - value = module.params['value'] - - r = redis.StrictRedis(**redis_params) - - try: - r.ping() - except Exception as e: - module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) - - try: - old_value = r.config_get(name)[name] - except Exception as e: - module.fail_json(msg="unable to read config: %s" % to_native(e), exception=traceback.format_exc()) - changed = old_value != value - - if module.check_mode or not changed: - module.exit_json(changed=changed, name=name, value=value) - else: - try: - r.config_set(name, value) - except Exception as e: - module.fail_json(msg="unable to write config: %s" % to_native(e), exception=traceback.format_exc()) - module.exit_json(changed=changed, name=name, value=value) - else: - module.fail_json(msg='A valid command must be provided') - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/database/misc/redis_data.py b/ansible_collections/community/general/plugins/modules/database/misc/redis_data.py deleted file mode 100644 index 587b37d0..00000000 --- a/ansible_collections/community/general/plugins/modules/database/misc/redis_data.py +++ /dev/null @@ -1,250 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Andreas Botzner -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: redis_data -short_description: Set key value pairs in Redis -version_added: 3.7.0 -description: - - Set key value pairs in Redis database. -author: "Andreas Botzner (@paginabianca)" -options: - key: - description: - - Database key. - required: true - type: str - value: - description: - - Value that key should be set to. - required: false - type: str - expiration: - description: - - Expiration time in milliseconds. - Setting this flag will always result in a change in the database. - required: false - type: int - non_existing: - description: - - Only set key if it does not already exist. - required: false - type: bool - existing: - description: - - Only set key if it already exists. - required: false - type: bool - keep_ttl: - description: - - Retain the time to live associated with the key. - required: false - type: bool - state: - description: - - State of the key. - default: present - type: str - choices: - - present - - absent - -extends_documentation_fragment: - - community.general.redis.documentation - -seealso: - - module: community.general.redis_data_incr - - module: community.general.redis_data_info - - module: community.general.redis -''' - -EXAMPLES = ''' -- name: Set key foo=bar on localhost with no username - community.general.redis_data: - login_host: localhost - login_password: supersecret - key: foo - value: bar - state: present - -- name: Set key foo=bar if non existing with expiration of 30s - community.general.redis_data: - login_host: localhost - login_password: supersecret - key: foo - value: bar - non_existing: true - expiration: 30000 - state: present - -- name: Set key foo=bar if existing and keep current TTL - community.general.redis_data: - login_host: localhost - login_password: supersecret - key: foo - value: bar - existing: true - keep_ttl: true - -- name: Set key foo=bar on redishost with custom ca-cert file - community.general.redis_data: - login_host: redishost - login_password: supersecret - login_user: someuser - validate_certs: true - ssl_ca_certs: /path/to/ca/certs - key: foo - value: bar - -- name: Delete key foo on localhost with no username - community.general.redis_data: - login_host: localhost - login_password: supersecret - key: foo - state: absent -''' - -RETURN = ''' -old_value: - description: Value of key before setting. - returned: on_success if state is C(present) and key exists in database. - type: str - sample: 'old_value_of_key' -value: - description: Value key was set to. - returned: on success if state is C(present). - type: str - sample: 'new_value_of_key' -msg: - description: A short message. - returned: always - type: str - sample: 'Set key: foo to bar' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redis import ( - fail_imports, redis_auth_argument_spec, RedisAnsible) - - -def main(): - redis_auth_args = redis_auth_argument_spec() - module_args = dict( - key=dict(type='str', required=True, no_log=False), - value=dict(type='str', required=False), - expiration=dict(type='int', required=False), - non_existing=dict(type='bool', required=False), - existing=dict(type='bool', required=False), - keep_ttl=dict(type='bool', required=False), - state=dict(type='str', default='present', - choices=['present', 'absent']), - ) - module_args.update(redis_auth_args) - - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True, - required_if=[('state', 'present', ('value',))], - mutually_exclusive=[['non_existing', 'existing'], - ['keep_ttl', 'expiration']],) - fail_imports(module) - - redis = RedisAnsible(module) - - key = module.params['key'] - value = module.params['value'] - px = module.params['expiration'] - nx = module.params['non_existing'] - xx = module.params['existing'] - keepttl = module.params['keep_ttl'] - state = module.params['state'] - set_args = {'name': key, 'value': value, 'px': px, - 'nx': nx, 'xx': xx, 'keepttl': keepttl} - - result = {'changed': False} - - old_value = None - try: - old_value = redis.connection.get(key) - except Exception as e: - msg = 'Failed to get value of key: {0} with exception: {1}'.format( - key, str(e)) - result['msg'] = msg - module.fail_json(**result) - - if state == 'absent': - if module.check_mode: - if old_value is None: - msg = 'Key: {0} not present'.format(key) - result['msg'] = msg - module.exit_json(**result) - else: - msg = 'Deleted key: {0}'.format(key) - result['msg'] = msg - module.exit_json(**result) - try: - ret = redis.connection.delete(key) - if ret == 0: - msg = 'Key: {0} not present'.format(key) - result['msg'] = msg - module.exit_json(**result) - else: - msg = 'Deleted key: {0}'.format(key) - result['msg'] = msg - result['changed'] = True - module.exit_json(**result) - except Exception as e: - msg = 'Failed to delete key: {0} with exception: {1}'.format( - key, str(e)) - result['msg'] = msg - module.fail_json(**result) - - old_value = None - try: - old_value = redis.connection.get(key) - except Exception as e: - msg = 'Failed to get value of key: {0} with exception: {1}'.format( - key, str(e)) - result['msg'] = msg - module.fail_json(**result) - - result['old_value'] = old_value - if old_value == value and keepttl is not False and px is None: - msg = 'Key {0} already has desired value'.format(key) - result['msg'] = msg - result['value'] = value - module.exit_json(**result) - if module.check_mode: - result['msg'] = 'Set key: {0}'.format(key) - result['value'] = value - module.exit_json(**result) - try: - ret = redis.connection.set(**set_args) - if ret is None: - if nx: - msg = 'Could not set key: {0}. Key already present.'.format( - key) - else: - msg = 'Could not set key: {0}. Key not present.'.format(key) - result['msg'] = msg - module.fail_json(**result) - msg = 'Set key: {0}'.format(key) - result['msg'] = msg - result['changed'] = True - result['value'] = value - module.exit_json(**result) - except Exception as e: - msg = 'Failed to set key: {0} with exception: {2}'.format(key, str(e)) - result['msg'] = msg - module.fail_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/database/misc/redis_data_incr.py b/ansible_collections/community/general/plugins/modules/database/misc/redis_data_incr.py deleted file mode 100644 index e9e03941..00000000 --- a/ansible_collections/community/general/plugins/modules/database/misc/redis_data_incr.py +++ /dev/null @@ -1,187 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Andreas Botzner -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: redis_data_incr -short_description: Increment keys in Redis -version_added: 4.0.0 -description: - - Increment integers or float keys in Redis database and get new value. - - Default increment for all keys is 1. For specific increments use the - I(increment_int) and I(increment_float) options. - - When using I(check_mode) the module will try to calculate the value that - Redis would return. If the key is not present, 0.0 is used as value. -author: "Andreas Botzner (@paginabianca)" -options: - key: - description: - - Database key. - type: str - required: true - increment_int: - description: - - Integer amount to increment the key by. - required: false - type: int - increment_float: - description: - - Float amount to increment the key by. - - This only works with keys that contain float values - in their string representation. - type: float - required: false - - -extends_documentation_fragment: - - community.general.redis.documentation - -notes: - - For C(check_mode) to work, the specified I(redis_user) needs permission to - run the C(GET) command on the key, otherwise the module will fail. - -seealso: - - module: community.general.redis_data - - module: community.general.redis_data_info - - module: community.general.redis -''' - -EXAMPLES = ''' -- name: Increment integer key foo on localhost with no username and print new value - community.general.redis_data_incr: - login_host: localhost - login_password: supersecret - key: foo - increment_int: 1 - register: result -- name: Print new value - debug: - var: result.value - -- name: Increment float key foo by 20.4 - community.general.redis_data_incr: - login_host: redishost - login_user: redisuser - login_password: somepass - key: foo - increment_float: '20.4' -''' - -RETURN = ''' -value: - description: Incremented value of key - returned: on success - type: float - sample: '4039.4' -msg: - description: A short message. - returned: always - type: str - sample: 'Incremented key: foo by 20.4 to 65.9' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redis import ( - fail_imports, redis_auth_argument_spec, RedisAnsible) - - -def main(): - redis_auth_args = redis_auth_argument_spec() - module_args = dict( - key=dict(type='str', required=True, no_log=False), - increment_int=dict(type='int', required=False), - increment_float=dict(type='float', required=False), - ) - module_args.update(redis_auth_args) - - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True, - mutually_exclusive=[['increment_int', 'increment_float']], - ) - fail_imports(module) - - redis = RedisAnsible(module) - key = module.params['key'] - increment_float = module.params['increment_float'] - increment_int = module.params['increment_int'] - increment = 1 - if increment_float is not None: - increment = increment_float - elif increment_int is not None: - increment = increment_int - - result = {'changed': False} - if module.check_mode: - value = 0.0 - try: - res = redis.connection.get(key) - if res is not None: - value = float(res) - except ValueError as e: - msg = 'Value: {0} of key: {1} is not incrementable(int or float)'.format( - res, key) - result['msg'] = msg - module.fail_json(**result) - except Exception as e: - msg = 'Failed to get value of key: {0} with exception: {1}'.format( - key, str(e)) - result['msg'] = msg - module.fail_json(**result) - msg = 'Incremented key: {0} by {1} to {2}'.format( - key, increment, value + increment) - result['msg'] = msg - result['value'] = float(value + increment) - module.exit_json(**result) - - if increment_float is not None: - try: - value = redis.connection.incrbyfloat(key, increment) - msg = 'Incremented key: {0} by {1} to {2}'.format( - key, increment, value) - result['msg'] = msg - result['value'] = float(value) - result['changed'] = True - module.exit_json(**result) - except Exception as e: - msg = 'Failed to increment key: {0} by {1} with exception: {2}'.format( - key, increment, str(e)) - result['msg'] = msg - module.fail_json(**result) - elif increment_int is not None: - try: - value = redis.connection.incrby(key, increment) - msg = 'Incremented key: {0} by {1} to {2}'.format( - key, increment, value) - result['msg'] = msg - result['value'] = float(value) - result['changed'] = True - module.exit_json(**result) - except Exception as e: - msg = 'Failed to increment key: {0} by {1} with exception: {2}'.format( - key, increment, str(e)) - result['msg'] = msg - module.fail_json(**result) - else: - try: - value = redis.connection.incr(key) - msg = 'Incremented key: {0} to {1}'.format(key, value) - result['msg'] = msg - result['value'] = float(value) - result['changed'] = True - module.exit_json(**result) - except Exception as e: - msg = 'Failed to increment key: {0} with exception: {1}'.format( - key, str(e)) - result['msg'] = msg - module.fail_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/database/misc/redis_data_info.py b/ansible_collections/community/general/plugins/modules/database/misc/redis_data_info.py deleted file mode 100644 index 7ecfd4a2..00000000 --- a/ansible_collections/community/general/plugins/modules/database/misc/redis_data_info.py +++ /dev/null @@ -1,113 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Andreas Botzner -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: redis_data_info -short_description: Get value of key in Redis database -version_added: 3.7.0 -description: - - Get value of keys in Redis database. -author: "Andreas Botzner (@paginabianca)" -options: - key: - description: - - Database key. - type: str - required: true - -extends_documentation_fragment: - - community.general.redis - -seealso: - - module: community.general.redis_data - - module: community.general.redis_data_incr - - module: community.general.redis_info - - module: community.general.redis -''' - -EXAMPLES = ''' -- name: Get key foo=bar from loalhost with no username - community.general.redis_data_info: - login_host: localhost - login_password: supersecret - key: foo - -- name: Get key foo=bar on redishost with custom ca-cert file - community.general.redis_data_info: - login_host: redishost - login_password: supersecret - login_user: somuser - validate_certs: true - ssl_ca_certs: /path/to/ca/certs - key: foo -''' - -RETURN = ''' -exists: - description: If they key exists in the database. - returned: on success - type: bool -value: - description: Value key was set to. - returned: if existing - type: str - sample: 'value_of_some_key' -msg: - description: A short message. - returned: always - type: str - sample: 'Got key: foo with value: bar' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redis import ( - fail_imports, redis_auth_argument_spec, RedisAnsible) - - -def main(): - redis_auth_args = redis_auth_argument_spec() - module_args = dict( - key=dict(type='str', required=True, no_log=False), - ) - module_args.update(redis_auth_args) - - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True, - ) - fail_imports(module) - - redis = RedisAnsible(module) - - key = module.params['key'] - result = {'changed': False} - - value = None - try: - value = redis.connection.get(key) - except Exception as e: - msg = 'Failed to get value of key "{0}" with exception: {1}'.format( - key, str(e)) - result['msg'] = msg - module.fail_json(**result) - - if value is None: - msg = 'Key "{0}" does not exist in database'.format(key) - result['exists'] = False - else: - msg = 'Got key "{0}"'.format(key) - result['value'] = value - result['exists'] = True - result['msg'] = msg - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/database/misc/redis_info.py b/ansible_collections/community/general/plugins/modules/database/misc/redis_info.py deleted file mode 100644 index 9762b03c..00000000 --- a/ansible_collections/community/general/plugins/modules/database/misc/redis_info.py +++ /dev/null @@ -1,236 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2020, Pavlo Bashynskyi (@levonet) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: redis_info -short_description: Gather information about Redis servers -version_added: '0.2.0' -description: -- Gathers information and statistics about Redis servers. -options: - login_host: - description: - - The host running the database. - type: str - default: localhost - login_port: - description: - - The port to connect to. - type: int - default: 6379 - login_password: - description: - - The password used to authenticate with, when authentication is enabled for the Redis server. - type: str -notes: -- Requires the redis-py Python package on the remote host. You can - install it with pip (C(pip install redis)) or with a package manager. - U(https://github.com/andymccurdy/redis-py) -seealso: -- module: community.general.redis -requirements: [ redis ] -author: "Pavlo Bashynskyi (@levonet)" -''' - -EXAMPLES = r''' -- name: Get server information - community.general.redis_info: - register: result - -- name: Print server information - ansible.builtin.debug: - var: result.info -''' - -RETURN = r''' -info: - description: The default set of server information sections U(https://redis.io/commands/info). - returned: success - type: dict - sample: { - "active_defrag_hits": 0, - "active_defrag_key_hits": 0, - "active_defrag_key_misses": 0, - "active_defrag_misses": 0, - "active_defrag_running": 0, - "allocator_active": 932409344, - "allocator_allocated": 932062792, - "allocator_frag_bytes": 346552, - "allocator_frag_ratio": 1.0, - "allocator_resident": 947253248, - "allocator_rss_bytes": 14843904, - "allocator_rss_ratio": 1.02, - "aof_current_rewrite_time_sec": -1, - "aof_enabled": 0, - "aof_last_bgrewrite_status": "ok", - "aof_last_cow_size": 0, - "aof_last_rewrite_time_sec": -1, - "aof_last_write_status": "ok", - "aof_rewrite_in_progress": 0, - "aof_rewrite_scheduled": 0, - "arch_bits": 64, - "atomicvar_api": "atomic-builtin", - "blocked_clients": 0, - "client_recent_max_input_buffer": 4, - "client_recent_max_output_buffer": 0, - "cluster_enabled": 0, - "config_file": "", - "configured_hz": 10, - "connected_clients": 4, - "connected_slaves": 0, - "db0": { - "avg_ttl": 1945628530, - "expires": 16, - "keys": 3341411 - }, - "evicted_keys": 0, - "executable": "/data/redis-server", - "expired_keys": 9, - "expired_stale_perc": 1.72, - "expired_time_cap_reached_count": 0, - "gcc_version": "9.2.0", - "hz": 10, - "instantaneous_input_kbps": 0.0, - "instantaneous_ops_per_sec": 0, - "instantaneous_output_kbps": 0.0, - "keyspace_hits": 0, - "keyspace_misses": 0, - "latest_fork_usec": 0, - "lazyfree_pending_objects": 0, - "loading": 0, - "lru_clock": 11603632, - "master_repl_offset": 118831417, - "master_replid": "0d904704e424e38c3cd896783e9f9d28d4836e5e", - "master_replid2": "0000000000000000000000000000000000000000", - "maxmemory": 0, - "maxmemory_human": "0B", - "maxmemory_policy": "noeviction", - "mem_allocator": "jemalloc-5.1.0", - "mem_aof_buffer": 0, - "mem_clients_normal": 49694, - "mem_clients_slaves": 0, - "mem_fragmentation_bytes": 12355480, - "mem_fragmentation_ratio": 1.01, - "mem_not_counted_for_evict": 0, - "mem_replication_backlog": 1048576, - "migrate_cached_sockets": 0, - "multiplexing_api": "epoll", - "number_of_cached_scripts": 0, - "os": "Linux 3.10.0-862.14.4.el7.x86_64 x86_64", - "process_id": 1, - "pubsub_channels": 0, - "pubsub_patterns": 0, - "rdb_bgsave_in_progress": 0, - "rdb_changes_since_last_save": 671, - "rdb_current_bgsave_time_sec": -1, - "rdb_last_bgsave_status": "ok", - "rdb_last_bgsave_time_sec": -1, - "rdb_last_cow_size": 0, - "rdb_last_save_time": 1588702236, - "redis_build_id": "a31260535f820267", - "redis_git_dirty": 0, - "redis_git_sha1": 0, - "redis_mode": "standalone", - "redis_version": "999.999.999", - "rejected_connections": 0, - "repl_backlog_active": 1, - "repl_backlog_first_byte_offset": 118707937, - "repl_backlog_histlen": 123481, - "repl_backlog_size": 1048576, - "role": "master", - "rss_overhead_bytes": -3051520, - "rss_overhead_ratio": 1.0, - "run_id": "8d252f66c3ef89bd60a060cf8dc5cfe3d511c5e4", - "second_repl_offset": 118830003, - "slave_expires_tracked_keys": 0, - "sync_full": 0, - "sync_partial_err": 0, - "sync_partial_ok": 0, - "tcp_port": 6379, - "total_commands_processed": 885, - "total_connections_received": 10, - "total_net_input_bytes": 802709255, - "total_net_output_bytes": 31754, - "total_system_memory": 135029538816, - "total_system_memory_human": "125.76G", - "uptime_in_days": 53, - "uptime_in_seconds": 4631778, - "used_cpu_sys": 4.668282, - "used_cpu_sys_children": 0.002191, - "used_cpu_user": 4.21088, - "used_cpu_user_children": 0.0, - "used_memory": 931908760, - "used_memory_dataset": 910774306, - "used_memory_dataset_perc": "97.82%", - "used_memory_human": "888.74M", - "used_memory_lua": 37888, - "used_memory_lua_human": "37.00K", - "used_memory_overhead": 21134454, - "used_memory_peak": 932015216, - "used_memory_peak_human": "888.84M", - "used_memory_peak_perc": "99.99%", - "used_memory_rss": 944201728, - "used_memory_rss_human": "900.46M", - "used_memory_scripts": 0, - "used_memory_scripts_human": "0B", - "used_memory_startup": 791264 - } -''' - -import traceback - -REDIS_IMP_ERR = None -try: - from redis import StrictRedis - HAS_REDIS_PACKAGE = True -except ImportError: - REDIS_IMP_ERR = traceback.format_exc() - HAS_REDIS_PACKAGE = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -def redis_client(**client_params): - return StrictRedis(**client_params) - - -# Module execution. -def main(): - module = AnsibleModule( - argument_spec=dict( - login_host=dict(type='str', default='localhost'), - login_port=dict(type='int', default=6379), - login_password=dict(type='str', no_log=True), - ), - supports_check_mode=True, - ) - - if not HAS_REDIS_PACKAGE: - module.fail_json(msg=missing_required_lib('redis'), exception=REDIS_IMP_ERR) - - login_host = module.params['login_host'] - login_port = module.params['login_port'] - login_password = module.params['login_password'] - - # Connect and check - client = redis_client(host=login_host, port=login_port, password=login_password) - try: - client.ping() - except Exception as e: - module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) - - info = client.info() - module.exit_json(changed=False, info=info) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/database/misc/riak.py b/ansible_collections/community/general/plugins/modules/database/misc/riak.py deleted file mode 100644 index 4ee7b5b6..00000000 --- a/ansible_collections/community/general/plugins/modules/database/misc/riak.py +++ /dev/null @@ -1,230 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, James Martin , Drew Kerrigan -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: riak -short_description: This module handles some common Riak operations -description: - - This module can be used to join nodes to a cluster, check - the status of the cluster. -author: - - "James Martin (@jsmartin)" - - "Drew Kerrigan (@drewkerrigan)" -options: - command: - description: - - The command you would like to perform against the cluster. - choices: ['ping', 'kv_test', 'join', 'plan', 'commit'] - type: str - config_dir: - description: - - The path to the riak configuration directory - default: /etc/riak - type: path - http_conn: - description: - - The ip address and port that is listening for Riak HTTP queries - default: 127.0.0.1:8098 - type: str - target_node: - description: - - The target node for certain operations (join, ping) - default: riak@127.0.0.1 - type: str - wait_for_handoffs: - description: - - Number of seconds to wait for handoffs to complete. - type: int - default: 0 - wait_for_ring: - description: - - Number of seconds to wait for all nodes to agree on the ring. - type: int - default: 0 - wait_for_service: - description: - - Waits for a riak service to come online before continuing. - choices: ['kv'] - type: str - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - type: bool - default: 'yes' -''' - -EXAMPLES = ''' -- name: "Join's a Riak node to another node" - community.general.riak: - command: join - target_node: riak@10.1.1.1 - -- name: Wait for handoffs to finish. Use with async and poll. - community.general.riak: - wait_for_handoffs: yes - -- name: Wait for riak_kv service to startup - community.general.riak: - wait_for_service: kv -''' - -import json -import time - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url - - -def ring_check(module, riak_admin_bin): - cmd = '%s ringready' % riak_admin_bin - rc, out, err = module.run_command(cmd) - if rc == 0 and 'TRUE All nodes agree on the ring' in out: - return True - else: - return False - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - command=dict(required=False, default=None, choices=[ - 'ping', 'kv_test', 'join', 'plan', 'commit']), - config_dir=dict(default='/etc/riak', type='path'), - http_conn=dict(required=False, default='127.0.0.1:8098'), - target_node=dict(default='riak@127.0.0.1', required=False), - wait_for_handoffs=dict(default=0, type='int'), - wait_for_ring=dict(default=0, type='int'), - wait_for_service=dict( - required=False, default=None, choices=['kv']), - validate_certs=dict(default=True, type='bool')) - ) - - command = module.params.get('command') - http_conn = module.params.get('http_conn') - target_node = module.params.get('target_node') - wait_for_handoffs = module.params.get('wait_for_handoffs') - wait_for_ring = module.params.get('wait_for_ring') - wait_for_service = module.params.get('wait_for_service') - - # make sure riak commands are on the path - riak_bin = module.get_bin_path('riak') - riak_admin_bin = module.get_bin_path('riak-admin') - - timeout = time.time() + 120 - while True: - if time.time() > timeout: - module.fail_json(msg='Timeout, could not fetch Riak stats.') - (response, info) = fetch_url(module, 'http://%s/stats' % (http_conn), force=True, timeout=5) - if info['status'] == 200: - stats_raw = response.read() - break - time.sleep(5) - - # here we attempt to load those stats, - try: - stats = json.loads(stats_raw) - except Exception: - module.fail_json(msg='Could not parse Riak stats.') - - node_name = stats['nodename'] - nodes = stats['ring_members'] - ring_size = stats['ring_creation_size'] - rc, out, err = module.run_command([riak_bin, 'version']) - version = out.strip() - - result = dict(node_name=node_name, - nodes=nodes, - ring_size=ring_size, - version=version) - - if command == 'ping': - cmd = '%s ping %s' % (riak_bin, target_node) - rc, out, err = module.run_command(cmd) - if rc == 0: - result['ping'] = out - else: - module.fail_json(msg=out) - - elif command == 'kv_test': - cmd = '%s test' % riak_admin_bin - rc, out, err = module.run_command(cmd) - if rc == 0: - result['kv_test'] = out - else: - module.fail_json(msg=out) - - elif command == 'join': - if nodes.count(node_name) == 1 and len(nodes) > 1: - result['join'] = 'Node is already in cluster or staged to be in cluster.' - else: - cmd = '%s cluster join %s' % (riak_admin_bin, target_node) - rc, out, err = module.run_command(cmd) - if rc == 0: - result['join'] = out - result['changed'] = True - else: - module.fail_json(msg=out) - - elif command == 'plan': - cmd = '%s cluster plan' % riak_admin_bin - rc, out, err = module.run_command(cmd) - if rc == 0: - result['plan'] = out - if 'Staged Changes' in out: - result['changed'] = True - else: - module.fail_json(msg=out) - - elif command == 'commit': - cmd = '%s cluster commit' % riak_admin_bin - rc, out, err = module.run_command(cmd) - if rc == 0: - result['commit'] = out - result['changed'] = True - else: - module.fail_json(msg=out) - -# this could take a while, recommend to run in async mode - if wait_for_handoffs: - timeout = time.time() + wait_for_handoffs - while True: - cmd = '%s transfers' % riak_admin_bin - rc, out, err = module.run_command(cmd) - if 'No transfers active' in out: - result['handoffs'] = 'No transfers active.' - break - time.sleep(10) - if time.time() > timeout: - module.fail_json(msg='Timeout waiting for handoffs.') - - if wait_for_service: - cmd = [riak_admin_bin, 'wait_for_service', 'riak_%s' % wait_for_service, node_name] - rc, out, err = module.run_command(cmd) - result['service'] = out - - if wait_for_ring: - timeout = time.time() + wait_for_ring - while True: - if ring_check(module, riak_admin_bin): - break - time.sleep(10) - if time.time() > timeout: - module.fail_json(msg='Timeout waiting for nodes to agree on ring.') - - result['ring_ready'] = ring_check(module, riak_admin_bin) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/database/mssql/mssql_db.py b/ansible_collections/community/general/plugins/modules/database/mssql/mssql_db.py deleted file mode 100644 index e6c5f183..00000000 --- a/ansible_collections/community/general/plugins/modules/database/mssql/mssql_db.py +++ /dev/null @@ -1,233 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Vedit Firat Arig -# Outline and parts are reused from Mark Theunissen's mysql_db module -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: mssql_db -short_description: Add or remove MSSQL databases from a remote host. -description: - - Add or remove MSSQL databases from a remote host. -options: - name: - description: - - name of the database to add or remove - required: true - aliases: [ db ] - type: str - login_user: - description: - - The username used to authenticate with - type: str - login_password: - description: - - The password used to authenticate with - type: str - login_host: - description: - - Host running the database - type: str - required: true - login_port: - description: - - Port of the MSSQL server. Requires login_host be defined as other than localhost if login_port is used - default: '1433' - type: str - state: - description: - - The database state - default: present - choices: [ "present", "absent", "import" ] - type: str - target: - description: - - Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL - files (C(.sql)) files are supported. - type: str - autocommit: - description: - - Automatically commit the change only if the import succeed. Sometimes it is necessary to use autocommit=true, since some content can't be changed - within a transaction. - type: bool - default: 'no' -notes: - - Requires the pymssql Python package on the remote host. For Ubuntu, this - is as easy as pip install pymssql (See M(ansible.builtin.pip).) -requirements: - - python >= 2.7 - - pymssql -author: Vedit Firat Arig (@vedit) -''' - -EXAMPLES = ''' -- name: Create a new database with name 'jackdata' - community.general.mssql_db: - name: jackdata - state: present - -# Copy database dump file to remote host and restore it to database 'my_db' -- name: Copy database dump file to remote host - ansible.builtin.copy: - src: dump.sql - dest: /tmp - -- name: Restore the dump file to database 'my_db' - community.general.mssql_db: - name: my_db - state: import - target: /tmp/dump.sql -''' - -RETURN = ''' -# -''' - -import os -import traceback - -PYMSSQL_IMP_ERR = None -try: - import pymssql -except ImportError: - PYMSSQL_IMP_ERR = traceback.format_exc() - mssql_found = False -else: - mssql_found = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -def db_exists(conn, cursor, db): - cursor.execute("SELECT name FROM master.sys.databases WHERE name = %s", db) - conn.commit() - return bool(cursor.rowcount) - - -def db_create(conn, cursor, db): - cursor.execute("CREATE DATABASE [%s]" % db) - return db_exists(conn, cursor, db) - - -def db_delete(conn, cursor, db): - try: - cursor.execute("ALTER DATABASE [%s] SET single_user WITH ROLLBACK IMMEDIATE" % db) - except Exception: - pass - cursor.execute("DROP DATABASE [%s]" % db) - return not db_exists(conn, cursor, db) - - -def db_import(conn, cursor, module, db, target): - if os.path.isfile(target): - with open(target, 'r') as backup: - sqlQuery = "USE [%s]\n" % db - for line in backup: - if line is None: - break - elif line.startswith('GO'): - cursor.execute(sqlQuery) - sqlQuery = "USE [%s]\n" % db - else: - sqlQuery += line - cursor.execute(sqlQuery) - conn.commit() - return 0, "import successful", "" - else: - return 1, "cannot find target file", "cannot find target file" - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True, aliases=['db']), - login_user=dict(default=''), - login_password=dict(default='', no_log=True), - login_host=dict(required=True), - login_port=dict(default='1433'), - target=dict(default=None), - autocommit=dict(type='bool', default=False), - state=dict( - default='present', choices=['present', 'absent', 'import']) - ) - ) - - if not mssql_found: - module.fail_json(msg=missing_required_lib('pymssql'), exception=PYMSSQL_IMP_ERR) - - db = module.params['name'] - state = module.params['state'] - autocommit = module.params['autocommit'] - target = module.params["target"] - - login_user = module.params['login_user'] - login_password = module.params['login_password'] - login_host = module.params['login_host'] - login_port = module.params['login_port'] - - login_querystring = login_host - if login_port != "1433": - login_querystring = "%s:%s" % (login_host, login_port) - - if login_user != "" and login_password == "": - module.fail_json(msg="when supplying login_user arguments login_password must be provided") - - try: - conn = pymssql.connect(user=login_user, password=login_password, host=login_querystring, database='master') - cursor = conn.cursor() - except Exception as e: - if "Unknown database" in str(e): - errno, errstr = e.args - module.fail_json(msg="ERROR: %s %s" % (errno, errstr)) - else: - module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check your " - "@sysconfdir@/freetds.conf / ${HOME}/.freetds.conf") - - conn.autocommit(True) - changed = False - - if db_exists(conn, cursor, db): - if state == "absent": - try: - changed = db_delete(conn, cursor, db) - except Exception as e: - module.fail_json(msg="error deleting database: " + str(e)) - elif state == "import": - conn.autocommit(autocommit) - rc, stdout, stderr = db_import(conn, cursor, module, db, target) - - if rc != 0: - module.fail_json(msg="%s" % stderr) - else: - module.exit_json(changed=True, db=db, msg=stdout) - else: - if state == "present": - try: - changed = db_create(conn, cursor, db) - except Exception as e: - module.fail_json(msg="error creating database: " + str(e)) - elif state == "import": - try: - changed = db_create(conn, cursor, db) - except Exception as e: - module.fail_json(msg="error creating database: " + str(e)) - - conn.autocommit(autocommit) - rc, stdout, stderr = db_import(conn, cursor, module, db, target) - - if rc != 0: - module.fail_json(msg="%s" % stderr) - else: - module.exit_json(changed=True, db=db, msg=stdout) - - module.exit_json(changed=changed, db=db) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/database/mssql/mssql_script.py b/ansible_collections/community/general/plugins/modules/database/mssql/mssql_script.py deleted file mode 100644 index bb80607c..00000000 --- a/ansible_collections/community/general/plugins/modules/database/mssql/mssql_script.py +++ /dev/null @@ -1,301 +0,0 @@ -#!/usr/bin/python - -# Copyright: (c) 2021, Kris Budde = 2.7 - - pymssql - -author: - - Kris Budde (@kbudde) -''' - -EXAMPLES = r''' -- name: Check DB connection - community.general.mssql_script: - login_user: "{{ mssql_login_user }}" - login_password: "{{ mssql_login_password }}" - login_host: "{{ mssql_host }}" - login_port: "{{ mssql_port }}" - db: master - script: "SELECT 1" - -- name: Query with parameter - community.general.mssql_script: - login_user: "{{ mssql_login_user }}" - login_password: "{{ mssql_login_password }}" - login_host: "{{ mssql_host }}" - login_port: "{{ mssql_port }}" - script: | - SELECT name, state_desc FROM sys.databases WHERE name = %(dbname)s - params: - dbname: msdb - register: result_params -- assert: - that: - - result_params.query_results[0][0][0][0] == 'msdb' - - result_params.query_results[0][0][0][1] == 'ONLINE' - -- name: two batches with default output - community.general.mssql_script: - login_user: "{{ mssql_login_user }}" - login_password: "{{ mssql_login_password }}" - login_host: "{{ mssql_host }}" - login_port: "{{ mssql_port }}" - script: | - SELECT 'Batch 0 - Select 0' - SELECT 'Batch 0 - Select 1' - GO - SELECT 'Batch 1 - Select 0' - register: result_batches -- assert: - that: - - result_batches.query_results | length == 2 # two batch results - - result_batches.query_results[0] | length == 2 # two selects in first batch - - result_batches.query_results[0][0] | length == 1 # one row in first select - - result_batches.query_results[0][0][0] | length == 1 # one column in first row - - result_batches.query_results[0][0][0][0] == 'Batch 0 - Select 0' # each row contains a list of values. - -- name: two batches with dict output - community.general.mssql_script: - login_user: "{{ mssql_login_user }}" - login_password: "{{ mssql_login_password }}" - login_host: "{{ mssql_host }}" - login_port: "{{ mssql_port }}" - output: dict - script: | - SELECT 'Batch 0 - Select 0' as b0s0 - SELECT 'Batch 0 - Select 1' as b0s1 - GO - SELECT 'Batch 1 - Select 0' as b1s0 - register: result_batches_dict -- assert: - that: - - result_batches_dict.query_results_dict | length == 2 # two batch results - - result_batches_dict.query_results_dict[0] | length == 2 # two selects in first batch - - result_batches_dict.query_results_dict[0][0] | length == 1 # one row in first select - - result_batches_dict.query_results_dict[0][0][0]['b0s0'] == 'Batch 0 - Select 0' # column 'b0s0' of first row -''' - -RETURN = r''' -query_results: - description: List of batches (queries separated by C(GO) keyword). - type: list - elements: list - returned: success and I(output=default) - sample: [[[["Batch 0 - Select 0"]], [["Batch 0 - Select 1"]]], [[["Batch 1 - Select 0"]]]] - contains: - queries: - description: - - List of result sets of each query. - - If a query returns no results, the results of this and all the following queries will not be included in the output. - - Use the C(GO) keyword in I(script) to separate queries. - type: list - elements: list - contains: - rows: - description: List of rows returned by query. - type: list - elements: list - contains: - column_value: - description: - - List of column values. - - Any non-standard JSON type is converted to string. - type: list - example: ["Batch 0 - Select 0"] - returned: success, if output is default -query_results_dict: - description: List of batches (queries separated by C(GO) keyword). - type: list - elements: list - returned: success and I(output=dict) - sample: [[[["Batch 0 - Select 0"]], [["Batch 0 - Select 1"]]], [[["Batch 1 - Select 0"]]]] - contains: - queries: - description: - - List of result sets of each query. - - If a query returns no results, the results of this and all the following queries will not be included in the output. - Use 'GO' keyword to separate queries. - type: list - elements: list - contains: - rows: - description: List of rows returned by query. - type: list - elements: list - contains: - column_dict: - description: - - Dictionary of column names and values. - - Any non-standard JSON type is converted to string. - type: dict - example: {"col_name": "Batch 0 - Select 0"} - returned: success, if output is dict -''' - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -import traceback -import json -PYMSSQL_IMP_ERR = None -try: - import pymssql -except ImportError: - PYMSSQL_IMP_ERR = traceback.format_exc() - MSSQL_FOUND = False -else: - MSSQL_FOUND = True - - -def clean_output(o): - return str(o) - - -def run_module(): - module_args = dict( - name=dict(required=False, aliases=['db'], default=''), - login_user=dict(), - login_password=dict(no_log=True), - login_host=dict(required=True), - login_port=dict(type='int', default=1433), - script=dict(required=True), - output=dict(default='default', choices=['dict', 'default']), - params=dict(type='dict'), - ) - - result = dict( - changed=False, - ) - - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True - ) - if not MSSQL_FOUND: - module.fail_json(msg=missing_required_lib( - 'pymssql'), exception=PYMSSQL_IMP_ERR) - - db = module.params['name'] - login_user = module.params['login_user'] - login_password = module.params['login_password'] - login_host = module.params['login_host'] - login_port = module.params['login_port'] - script = module.params['script'] - output = module.params['output'] - sql_params = module.params['params'] - - login_querystring = login_host - if login_port != 1433: - login_querystring = "%s:%s" % (login_host, login_port) - - if login_user is not None and login_password is None: - module.fail_json( - msg="when supplying login_user argument, login_password must also be provided") - - try: - conn = pymssql.connect( - user=login_user, password=login_password, host=login_querystring, database=db) - cursor = conn.cursor() - except Exception as e: - if "Unknown database" in str(e): - errno, errstr = e.args - module.fail_json(msg="ERROR: %s %s" % (errno, errstr)) - else: - module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check your " - "@sysconfdir@/freetds.conf / ${HOME}/.freetds.conf") - - conn.autocommit(True) - - query_results_key = 'query_results' - if output == 'dict': - cursor = conn.cursor(as_dict=True) - query_results_key = 'query_results_dict' - - queries = script.split('\nGO\n') - result['changed'] = True - if module.check_mode: - module.exit_json(**result) - - query_results = [] - try: - for query in queries: - cursor.execute(query, sql_params) - qry_result = [] - rows = cursor.fetchall() - while rows: - qry_result.append(rows) - rows = cursor.fetchall() - query_results.append(qry_result) - except Exception as e: - return module.fail_json(msg="query failed", query=query, error=str(e), **result) - - # ensure that the result is json serializable - qry_results = json.loads(json.dumps(query_results, default=clean_output)) - - result[query_results_key] = qry_results - module.exit_json(**result) - - -def main(): - run_module() - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/database/saphana/hana_query.py b/ansible_collections/community/general/plugins/modules/database/saphana/hana_query.py deleted file mode 100644 index ac026d5a..00000000 --- a/ansible_collections/community/general/plugins/modules/database/saphana/hana_query.py +++ /dev/null @@ -1,213 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Rainer Leber -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: hana_query -short_description: Execute SQL on HANA -version_added: 3.2.0 -description: This module executes SQL statements on HANA with hdbsql. -options: - sid: - description: The system ID. - type: str - required: true - instance: - description: The instance number. - type: str - required: true - user: - description: A dedicated username. The user could be also in hdbuserstore. Defaults to C(SYSTEM). - type: str - default: SYSTEM - userstore: - description: If C(true) the user must be in hdbuserstore. - type: bool - default: false - version_added: 3.5.0 - password: - description: - - The password to connect to the database. - - "B(Note:) Since the passwords have to be passed as command line arguments, I(userstore=true) should - be used whenever possible, as command line arguments can be seen by other users - on the same machine." - type: str - autocommit: - description: Autocommit the statement. - type: bool - default: true - host: - description: The Host IP address. The port can be defined as well. - type: str - database: - description: Define the database on which to connect. - type: str - encrypted: - description: Use encrypted connection. Defaults to C(false). - type: bool - default: false - filepath: - description: - - One or more files each containing one SQL query to run. - - Must be a string or list containing strings. - type: list - elements: path - query: - description: - - SQL query to run. - - Must be a string or list containing strings. Please note that if you supply a string, it will be split by commas (C(,)) to a list. - It is better to supply a one-element list instead to avoid mangled input. - type: list - elements: str -notes: - - Does not support C(check_mode). -author: - - Rainer Leber (@rainerleber) -''' - -EXAMPLES = r''' -- name: Simple select query - community.general.hana_query: - sid: "hdb" - instance: "01" - password: "Test123" - query: "select user_name from users" - -- name: Run several queries - community.general.hana_query: - sid: "hdb" - instance: "01" - password: "Test123" - query: - - "select user_name from users;" - - select * from SYSTEM; - host: "localhost" - autocommit: False - -- name: Run several queries from file - community.general.hana_query: - sid: "hdb" - instance: "01" - password: "Test123" - filepath: - - /tmp/HANA_CPU_UtilizationPerCore_2.00.020+.txt - - /tmp/HANA.txt - host: "localhost" - -- name: Run several queries from user store - community.general.hana_query: - sid: "hdb" - instance: "01" - user: hdbstoreuser - userstore: true - query: - - "select user_name from users;" - - select * from users; - autocommit: False -''' - -RETURN = r''' -query_result: - description: List containing results of all queries executed (one sublist for every query). - returned: on success - type: list - elements: list - sample: [[{"Column": "Value1"}, {"Column": "Value2"}], [{"Column": "Value1"}, {"Column": "Value2"}]] -''' - -import csv -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six import StringIO -from ansible.module_utils.common.text.converters import to_native - - -def csv_to_list(rawcsv): - reader_raw = csv.DictReader(StringIO(rawcsv)) - reader = [dict((k, v.strip()) for k, v in row.items()) for row in reader_raw] - return list(reader) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - sid=dict(type='str', required=True), - instance=dict(type='str', required=True), - encrypted=dict(type='bool', default=False), - host=dict(type='str', required=False), - user=dict(type='str', default="SYSTEM"), - userstore=dict(type='bool', default=False), - password=dict(type='str', no_log=True), - database=dict(type='str', required=False), - query=dict(type='list', elements='str', required=False), - filepath=dict(type='list', elements='path', required=False), - autocommit=dict(type='bool', default=True), - ), - required_one_of=[('query', 'filepath')], - required_if=[('userstore', False, ['password'])], - supports_check_mode=False, - ) - rc, out, err, out_raw = [0, [], "", ""] - - params = module.params - - sid = (params['sid']).upper() - instance = params['instance'] - user = params['user'] - userstore = params['userstore'] - password = params['password'] - autocommit = params['autocommit'] - host = params['host'] - database = params['database'] - encrypted = params['encrypted'] - - filepath = params['filepath'] - query = params['query'] - - bin_path = "/usr/sap/{sid}/HDB{instance}/exe/hdbsql".format(sid=sid, instance=instance) - - try: - command = [module.get_bin_path(bin_path, required=True)] - except Exception as e: - module.fail_json(msg='Failed to find hdbsql at the expected path "{0}". Please check SID and instance number: "{1}"'.format(bin_path, to_native(e))) - - if encrypted is True: - command.extend(['-attemptencrypt']) - if autocommit is False: - command.extend(['-z']) - if host is not None: - command.extend(['-n', host]) - if database is not None: - command.extend(['-d', database]) - # -x Suppresses additional output, such as the number of selected rows in a result set. - if userstore: - command.extend(['-x', '-U', user]) - else: - command.extend(['-x', '-i', instance, '-u', user, '-p', password]) - - if filepath is not None: - command.extend(['-I']) - for p in filepath: - # makes a command like hdbsql -i 01 -u SYSTEM -p secret123# -I /tmp/HANA_CPU_UtilizationPerCore_2.00.020+.txt, - # iterates through files and append the output to var out. - query_command = command + [p] - (rc, out_raw, err) = module.run_command(query_command) - out.append(csv_to_list(out_raw)) - if query is not None: - for q in query: - # makes a command like hdbsql -i 01 -u SYSTEM -p secret123# "select user_name from users", - # iterates through multiple commands and append the output to var out. - query_command = command + [q] - (rc, out_raw, err) = module.run_command(query_command) - out.append(csv_to_list(out_raw)) - changed = True - - module.exit_json(changed=changed, rc=rc, query_result=out, stderr=err) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/database/vertica/vertica_configuration.py b/ansible_collections/community/general/plugins/modules/database/vertica/vertica_configuration.py deleted file mode 100644 index b210e3f6..00000000 --- a/ansible_collections/community/general/plugins/modules/database/vertica/vertica_configuration.py +++ /dev/null @@ -1,196 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: vertica_configuration -short_description: Updates Vertica configuration parameters. -description: - - Updates Vertica configuration parameters. -options: - parameter: - description: - - Name of the parameter to update. - required: true - aliases: [name] - type: str - value: - description: - - Value of the parameter to be set. - type: str - db: - description: - - Name of the Vertica database. - type: str - cluster: - description: - - Name of the Vertica cluster. - default: localhost - type: str - port: - description: - - Vertica cluster port to connect to. - default: '5433' - type: str - login_user: - description: - - The username used to authenticate with. - default: dbadmin - type: str - login_password: - description: - - The password used to authenticate with. - type: str -notes: - - The default authentication assumes that you are either logging in as or sudo'ing - to the C(dbadmin) account on the host. - - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure - that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. - - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) - to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) - and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) - to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). -requirements: [ 'unixODBC', 'pyodbc' ] -author: "Dariusz Owczarek (@dareko)" -''' - -EXAMPLES = """ -- name: Updating load_balance_policy - community.general.vertica_configuration: name=failovertostandbyafter value='8 hours' -""" -import traceback - -PYODBC_IMP_ERR = None -try: - import pyodbc -except ImportError: - PYODBC_IMP_ERR = traceback.format_exc() - pyodbc_found = False -else: - pyodbc_found = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -class NotSupportedError(Exception): - pass - - -class CannotDropError(Exception): - pass - -# module specific functions - - -def get_configuration_facts(cursor, parameter_name=''): - facts = {} - cursor.execute(""" - select c.parameter_name, c.current_value, c.default_value - from configuration_parameters c - where c.node_name = 'ALL' - and (? = '' or c.parameter_name ilike ?) - """, parameter_name, parameter_name) - while True: - rows = cursor.fetchmany(100) - if not rows: - break - for row in rows: - facts[row.parameter_name.lower()] = { - 'parameter_name': row.parameter_name, - 'current_value': row.current_value, - 'default_value': row.default_value} - return facts - - -def check(configuration_facts, parameter_name, current_value): - parameter_key = parameter_name.lower() - if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower(): - return False - return True - - -def present(configuration_facts, cursor, parameter_name, current_value): - parameter_key = parameter_name.lower() - changed = False - if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower(): - cursor.execute("select set_config_parameter('{0}', '{1}')".format(parameter_name, current_value)) - changed = True - if changed: - configuration_facts.update(get_configuration_facts(cursor, parameter_name)) - return changed - -# module logic - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - parameter=dict(required=True, aliases=['name']), - value=dict(default=None), - db=dict(default=None), - cluster=dict(default='localhost'), - port=dict(default='5433'), - login_user=dict(default='dbadmin'), - login_password=dict(default=None, no_log=True), - ), supports_check_mode=True) - - if not pyodbc_found: - module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR) - - parameter_name = module.params['parameter'] - current_value = module.params['value'] - db = '' - if module.params['db']: - db = module.params['db'] - - changed = False - - try: - dsn = ( - "Driver=Vertica;" - "Server={0};" - "Port={1};" - "Database={2};" - "User={3};" - "Password={4};" - "ConnectionLoadBalance={5}" - ).format(module.params['cluster'], module.params['port'], db, - module.params['login_user'], module.params['login_password'], 'true') - db_conn = pyodbc.connect(dsn, autocommit=True) - cursor = db_conn.cursor() - except Exception as e: - module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e)), - exception=traceback.format_exc()) - - try: - configuration_facts = get_configuration_facts(cursor) - if module.check_mode: - changed = not check(configuration_facts, parameter_name, current_value) - else: - try: - changed = present(configuration_facts, cursor, parameter_name, current_value) - except pyodbc.Error as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - except NotSupportedError as e: - module.fail_json(msg=to_native(e), ansible_facts={'vertica_configuration': configuration_facts}) - except CannotDropError as e: - module.fail_json(msg=to_native(e), ansible_facts={'vertica_configuration': configuration_facts}) - except SystemExit: - # avoid catching this on python 2.4 - raise - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - module.exit_json(changed=changed, parameter=parameter_name, ansible_facts={'vertica_configuration': configuration_facts}) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/database/vertica/vertica_info.py b/ansible_collections/community/general/plugins/modules/database/vertica/vertica_info.py deleted file mode 100644 index 3822a071..00000000 --- a/ansible_collections/community/general/plugins/modules/database/vertica/vertica_info.py +++ /dev/null @@ -1,283 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: vertica_info -short_description: Gathers Vertica database facts. -description: - - Gathers Vertica database information. - - This module was called C(vertica_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.vertica_info) module no longer returns C(ansible_facts)! -options: - cluster: - description: - - Name of the cluster running the schema. - default: localhost - type: str - port: - description: - Database port to connect to. - default: '5433' - type: str - db: - description: - - Name of the database running the schema. - type: str - login_user: - description: - - The username used to authenticate with. - default: dbadmin - type: str - login_password: - description: - - The password used to authenticate with. - type: str -notes: - - The default authentication assumes that you are either logging in as or sudo'ing - to the C(dbadmin) account on the host. - - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure - that C(unixODBC) and C(pyodbc) are installed on the host and properly configured. - - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) - to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) - and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) - to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). -requirements: [ 'unixODBC', 'pyodbc' ] -author: "Dariusz Owczarek (@dareko)" -''' - -EXAMPLES = """ -- name: Gathering vertica facts - community.general.vertica_info: db=db_name - register: result - -- name: Print schemas - ansible.builtin.debug: - msg: "{{ result.vertica_schemas }}" -""" -import traceback - -PYODBC_IMP_ERR = None -try: - import pyodbc -except ImportError: - PYODBC_IMP_ERR = traceback.format_exc() - pyodbc_found = False -else: - pyodbc_found = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -class NotSupportedError(Exception): - pass - -# module specific functions - - -def get_schema_facts(cursor, schema=''): - facts = {} - cursor.execute(""" - select schema_name, schema_owner, create_time - from schemata - where not is_system_schema and schema_name not in ('public') - and (? = '' or schema_name ilike ?) - """, schema, schema) - while True: - rows = cursor.fetchmany(100) - if not rows: - break - for row in rows: - facts[row.schema_name.lower()] = { - 'name': row.schema_name, - 'owner': row.schema_owner, - 'create_time': str(row.create_time), - 'usage_roles': [], - 'create_roles': []} - cursor.execute(""" - select g.object_name as schema_name, r.name as role_name, - lower(g.privileges_description) privileges_description - from roles r join grants g - on g.grantee = r.name and g.object_type='SCHEMA' - and g.privileges_description like '%USAGE%' - and g.grantee not in ('public', 'dbadmin') - and (? = '' or g.object_name ilike ?) - """, schema, schema) - while True: - rows = cursor.fetchmany(100) - if not rows: - break - for row in rows: - schema_key = row.schema_name.lower() - if 'create' in row.privileges_description: - facts[schema_key]['create_roles'].append(row.role_name) - else: - facts[schema_key]['usage_roles'].append(row.role_name) - return facts - - -def get_user_facts(cursor, user=''): - facts = {} - cursor.execute(""" - select u.user_name, u.is_locked, u.lock_time, - p.password, p.acctexpired as is_expired, - u.profile_name, u.resource_pool, - u.all_roles, u.default_roles - from users u join password_auditor p on p.user_id = u.user_id - where not u.is_super_user - and (? = '' or u.user_name ilike ?) - """, user, user) - while True: - rows = cursor.fetchmany(100) - if not rows: - break - for row in rows: - user_key = row.user_name.lower() - facts[user_key] = { - 'name': row.user_name, - 'locked': str(row.is_locked), - 'password': row.password, - 'expired': str(row.is_expired), - 'profile': row.profile_name, - 'resource_pool': row.resource_pool, - 'roles': [], - 'default_roles': []} - if row.is_locked: - facts[user_key]['locked_time'] = str(row.lock_time) - if row.all_roles: - facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',') - if row.default_roles: - facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',') - return facts - - -def get_role_facts(cursor, role=''): - facts = {} - cursor.execute(""" - select r.name, r.assigned_roles - from roles r - where (? = '' or r.name ilike ?) - """, role, role) - while True: - rows = cursor.fetchmany(100) - if not rows: - break - for row in rows: - role_key = row.name.lower() - facts[role_key] = { - 'name': row.name, - 'assigned_roles': []} - if row.assigned_roles: - facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',') - return facts - - -def get_configuration_facts(cursor, parameter=''): - facts = {} - cursor.execute(""" - select c.parameter_name, c.current_value, c.default_value - from configuration_parameters c - where c.node_name = 'ALL' - and (? = '' or c.parameter_name ilike ?) - """, parameter, parameter) - while True: - rows = cursor.fetchmany(100) - if not rows: - break - for row in rows: - facts[row.parameter_name.lower()] = { - 'parameter_name': row.parameter_name, - 'current_value': row.current_value, - 'default_value': row.default_value} - return facts - - -def get_node_facts(cursor, schema=''): - facts = {} - cursor.execute(""" - select node_name, node_address, export_address, node_state, node_type, - catalog_path - from nodes - """) - while True: - rows = cursor.fetchmany(100) - if not rows: - break - for row in rows: - facts[row.node_address] = { - 'node_name': row.node_name, - 'export_address': row.export_address, - 'node_state': row.node_state, - 'node_type': row.node_type, - 'catalog_path': row.catalog_path} - return facts - -# module logic - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - cluster=dict(default='localhost'), - port=dict(default='5433'), - db=dict(default=None), - login_user=dict(default='dbadmin'), - login_password=dict(default=None, no_log=True), - ), supports_check_mode=True) - - if not pyodbc_found: - module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR) - - db = '' - if module.params['db']: - db = module.params['db'] - - try: - dsn = ( - "Driver=Vertica;" - "Server=%s;" - "Port=%s;" - "Database=%s;" - "User=%s;" - "Password=%s;" - "ConnectionLoadBalance=%s" - ) % (module.params['cluster'], module.params['port'], db, - module.params['login_user'], module.params['login_password'], 'true') - db_conn = pyodbc.connect(dsn, autocommit=True) - cursor = db_conn.cursor() - except Exception as e: - module.fail_json(msg="Unable to connect to database: %s." % to_native(e), exception=traceback.format_exc()) - - try: - schema_facts = get_schema_facts(cursor) - user_facts = get_user_facts(cursor) - role_facts = get_role_facts(cursor) - configuration_facts = get_configuration_facts(cursor) - node_facts = get_node_facts(cursor) - - module.exit_json(changed=False, - vertica_schemas=schema_facts, - vertica_users=user_facts, - vertica_roles=role_facts, - vertica_configuration=configuration_facts, - vertica_nodes=node_facts) - except NotSupportedError as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - except SystemExit: - # avoid catching this on python 2.4 - raise - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/database/vertica/vertica_role.py b/ansible_collections/community/general/plugins/modules/database/vertica/vertica_role.py deleted file mode 100644 index 4ec75d7d..00000000 --- a/ansible_collections/community/general/plugins/modules/database/vertica/vertica_role.py +++ /dev/null @@ -1,246 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: vertica_role -short_description: Adds or removes Vertica database roles and assigns roles to them. -description: - - Adds or removes Vertica database role and, optionally, assign other roles. -options: - role: - description: - - Name of the role to add or remove. - required: true - type: str - aliases: ['name'] - assigned_roles: - description: - - Comma separated list of roles to assign to the role. - aliases: ['assigned_role'] - type: str - state: - description: - - Whether to create C(present), drop C(absent) or lock C(locked) a role. - choices: ['present', 'absent'] - default: present - type: str - db: - description: - - Name of the Vertica database. - type: str - cluster: - description: - - Name of the Vertica cluster. - default: localhost - type: str - port: - description: - - Vertica cluster port to connect to. - default: '5433' - type: str - login_user: - description: - - The username used to authenticate with. - default: dbadmin - type: str - login_password: - description: - - The password used to authenticate with. - type: str -notes: - - The default authentication assumes that you are either logging in as or sudo'ing - to the C(dbadmin) account on the host. - - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure - that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. - - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) - to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) - and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) - to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). -requirements: [ 'unixODBC', 'pyodbc' ] -author: "Dariusz Owczarek (@dareko)" -''' - -EXAMPLES = """ -- name: Creating a new vertica role - community.general.vertica_role: name=role_name db=db_name state=present - -- name: Creating a new vertica role with other role assigned - community.general.vertica_role: name=role_name assigned_role=other_role_name state=present -""" -import traceback - -PYODBC_IMP_ERR = None -try: - import pyodbc -except ImportError: - PYODBC_IMP_ERR = traceback.format_exc() - pyodbc_found = False -else: - pyodbc_found = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -class NotSupportedError(Exception): - pass - - -class CannotDropError(Exception): - pass - -# module specific functions - - -def get_role_facts(cursor, role=''): - facts = {} - cursor.execute(""" - select r.name, r.assigned_roles - from roles r - where (? = '' or r.name ilike ?) - """, role, role) - while True: - rows = cursor.fetchmany(100) - if not rows: - break - for row in rows: - role_key = row.name.lower() - facts[role_key] = { - 'name': row.name, - 'assigned_roles': []} - if row.assigned_roles: - facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',') - return facts - - -def update_roles(role_facts, cursor, role, - existing, required): - for assigned_role in set(existing) - set(required): - cursor.execute("revoke {0} from {1}".format(assigned_role, role)) - for assigned_role in set(required) - set(existing): - cursor.execute("grant {0} to {1}".format(assigned_role, role)) - - -def check(role_facts, role, assigned_roles): - role_key = role.lower() - if role_key not in role_facts: - return False - if assigned_roles and sorted(assigned_roles) != sorted(role_facts[role_key]['assigned_roles']): - return False - return True - - -def present(role_facts, cursor, role, assigned_roles): - role_key = role.lower() - if role_key not in role_facts: - cursor.execute("create role {0}".format(role)) - update_roles(role_facts, cursor, role, [], assigned_roles) - role_facts.update(get_role_facts(cursor, role)) - return True - else: - changed = False - if assigned_roles and (sorted(assigned_roles) != sorted(role_facts[role_key]['assigned_roles'])): - update_roles(role_facts, cursor, role, - role_facts[role_key]['assigned_roles'], assigned_roles) - changed = True - if changed: - role_facts.update(get_role_facts(cursor, role)) - return changed - - -def absent(role_facts, cursor, role, assigned_roles): - role_key = role.lower() - if role_key in role_facts: - update_roles(role_facts, cursor, role, - role_facts[role_key]['assigned_roles'], []) - cursor.execute("drop role {0} cascade".format(role_facts[role_key]['name'])) - del role_facts[role_key] - return True - else: - return False - -# module logic - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - role=dict(required=True, aliases=['name']), - assigned_roles=dict(default=None, aliases=['assigned_role']), - state=dict(default='present', choices=['absent', 'present']), - db=dict(), - cluster=dict(default='localhost'), - port=dict(default='5433'), - login_user=dict(default='dbadmin'), - login_password=dict(no_log=True), - ), supports_check_mode=True) - - if not pyodbc_found: - module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR) - - role = module.params['role'] - assigned_roles = [] - if module.params['assigned_roles']: - assigned_roles = module.params['assigned_roles'].split(',') - assigned_roles = filter(None, assigned_roles) - state = module.params['state'] - db = '' - if module.params['db']: - db = module.params['db'] - - changed = False - - try: - dsn = ( - "Driver=Vertica;" - "Server={0};" - "Port={1};" - "Database={2};" - "User={3};" - "Password={4};" - "ConnectionLoadBalance={5}" - ).format(module.params['cluster'], module.params['port'], db, - module.params['login_user'], module.params['login_password'], 'true') - db_conn = pyodbc.connect(dsn, autocommit=True) - cursor = db_conn.cursor() - except Exception as e: - module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e))) - - try: - role_facts = get_role_facts(cursor) - if module.check_mode: - changed = not check(role_facts, role, assigned_roles) - elif state == 'absent': - try: - changed = absent(role_facts, cursor, role, assigned_roles) - except pyodbc.Error as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - elif state == 'present': - try: - changed = present(role_facts, cursor, role, assigned_roles) - except pyodbc.Error as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - except NotSupportedError as e: - module.fail_json(msg=to_native(e), ansible_facts={'vertica_roles': role_facts}) - except CannotDropError as e: - module.fail_json(msg=to_native(e), ansible_facts={'vertica_roles': role_facts}) - except SystemExit: - # avoid catching this on python 2.4 - raise - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - module.exit_json(changed=changed, role=role, ansible_facts={'vertica_roles': role_facts}) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/database/vertica/vertica_schema.py b/ansible_collections/community/general/plugins/modules/database/vertica/vertica_schema.py deleted file mode 100644 index 12af3e64..00000000 --- a/ansible_collections/community/general/plugins/modules/database/vertica/vertica_schema.py +++ /dev/null @@ -1,319 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: vertica_schema -short_description: Adds or removes Vertica database schema and roles. -description: - - Adds or removes Vertica database schema and, optionally, roles - with schema access privileges. - - A schema will not be removed until all the objects have been dropped. - - In such a situation, if the module tries to remove the schema it - will fail and only remove roles created for the schema if they have - no dependencies. -options: - schema: - description: - - Name of the schema to add or remove. - required: true - aliases: ['name'] - type: str - usage_roles: - description: - - Comma separated list of roles to create and grant usage access to the schema. - aliases: ['usage_role'] - type: str - create_roles: - description: - - Comma separated list of roles to create and grant usage and create access to the schema. - aliases: ['create_role'] - type: str - owner: - description: - - Name of the user to set as owner of the schema. - type: str - state: - description: - - Whether to create C(present), or drop C(absent) a schema. - default: present - choices: ['present', 'absent'] - type: str - db: - description: - - Name of the Vertica database. - type: str - cluster: - description: - - Name of the Vertica cluster. - default: localhost - type: str - port: - description: - - Vertica cluster port to connect to. - default: '5433' - type: str - login_user: - description: - - The username used to authenticate with. - default: dbadmin - type: str - login_password: - description: - - The password used to authenticate with. - type: str -notes: - - The default authentication assumes that you are either logging in as or sudo'ing - to the C(dbadmin) account on the host. - - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure - that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. - - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) - to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) - and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) - to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). -requirements: [ 'unixODBC', 'pyodbc' ] -author: "Dariusz Owczarek (@dareko)" -''' - -EXAMPLES = """ -- name: Creating a new vertica schema - community.general.vertica_schema: name=schema_name db=db_name state=present - -- name: Creating a new schema with specific schema owner - community.general.vertica_schema: name=schema_name owner=dbowner db=db_name state=present - -- name: Creating a new schema with roles - community.general.vertica_schema: - name=schema_name - create_roles=schema_name_all - usage_roles=schema_name_ro,schema_name_rw - db=db_name - state=present -""" -import traceback - -PYODBC_IMP_ERR = None -try: - import pyodbc -except ImportError: - PYODBC_IMP_ERR = traceback.format_exc() - pyodbc_found = False -else: - pyodbc_found = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -class NotSupportedError(Exception): - pass - - -class CannotDropError(Exception): - pass - -# module specific functions - - -def get_schema_facts(cursor, schema=''): - facts = {} - cursor.execute(""" - select schema_name, schema_owner, create_time - from schemata - where not is_system_schema and schema_name not in ('public', 'TxtIndex') - and (? = '' or schema_name ilike ?) - """, schema, schema) - while True: - rows = cursor.fetchmany(100) - if not rows: - break - for row in rows: - facts[row.schema_name.lower()] = { - 'name': row.schema_name, - 'owner': row.schema_owner, - 'create_time': str(row.create_time), - 'usage_roles': [], - 'create_roles': []} - cursor.execute(""" - select g.object_name as schema_name, r.name as role_name, - lower(g.privileges_description) privileges_description - from roles r join grants g - on g.grantee_id = r.role_id and g.object_type='SCHEMA' - and g.privileges_description like '%USAGE%' - and g.grantee not in ('public', 'dbadmin') - and (? = '' or g.object_name ilike ?) - """, schema, schema) - while True: - rows = cursor.fetchmany(100) - if not rows: - break - for row in rows: - schema_key = row.schema_name.lower() - if 'create' in row.privileges_description: - facts[schema_key]['create_roles'].append(row.role_name) - else: - facts[schema_key]['usage_roles'].append(row.role_name) - return facts - - -def update_roles(schema_facts, cursor, schema, - existing, required, - create_existing, create_required): - for role in set(existing + create_existing) - set(required + create_required): - cursor.execute("drop role {0} cascade".format(role)) - for role in set(create_existing) - set(create_required): - cursor.execute("revoke create on schema {0} from {1}".format(schema, role)) - for role in set(required + create_required) - set(existing + create_existing): - cursor.execute("create role {0}".format(role)) - cursor.execute("grant usage on schema {0} to {1}".format(schema, role)) - for role in set(create_required) - set(create_existing): - cursor.execute("grant create on schema {0} to {1}".format(schema, role)) - - -def check(schema_facts, schema, usage_roles, create_roles, owner): - schema_key = schema.lower() - if schema_key not in schema_facts: - return False - if owner and owner.lower() == schema_facts[schema_key]['owner'].lower(): - return False - if sorted(usage_roles) != sorted(schema_facts[schema_key]['usage_roles']): - return False - if sorted(create_roles) != sorted(schema_facts[schema_key]['create_roles']): - return False - return True - - -def present(schema_facts, cursor, schema, usage_roles, create_roles, owner): - schema_key = schema.lower() - if schema_key not in schema_facts: - query_fragments = ["create schema {0}".format(schema)] - if owner: - query_fragments.append("authorization {0}".format(owner)) - cursor.execute(' '.join(query_fragments)) - update_roles(schema_facts, cursor, schema, [], usage_roles, [], create_roles) - schema_facts.update(get_schema_facts(cursor, schema)) - return True - else: - changed = False - if owner and owner.lower() != schema_facts[schema_key]['owner'].lower(): - raise NotSupportedError(( - "Changing schema owner is not supported. " - "Current owner: {0}." - ).format(schema_facts[schema_key]['owner'])) - if sorted(usage_roles) != sorted(schema_facts[schema_key]['usage_roles']) or \ - sorted(create_roles) != sorted(schema_facts[schema_key]['create_roles']): - - update_roles(schema_facts, cursor, schema, - schema_facts[schema_key]['usage_roles'], usage_roles, - schema_facts[schema_key]['create_roles'], create_roles) - changed = True - if changed: - schema_facts.update(get_schema_facts(cursor, schema)) - return changed - - -def absent(schema_facts, cursor, schema, usage_roles, create_roles): - schema_key = schema.lower() - if schema_key in schema_facts: - update_roles(schema_facts, cursor, schema, - schema_facts[schema_key]['usage_roles'], [], schema_facts[schema_key]['create_roles'], []) - try: - cursor.execute("drop schema {0} restrict".format(schema_facts[schema_key]['name'])) - except pyodbc.Error: - raise CannotDropError("Dropping schema failed due to dependencies.") - del schema_facts[schema_key] - return True - else: - return False - -# module logic - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - schema=dict(required=True, aliases=['name']), - usage_roles=dict(aliases=['usage_role']), - create_roles=dict(aliases=['create_role']), - owner=dict(), - state=dict(default='present', choices=['absent', 'present']), - db=dict(), - cluster=dict(default='localhost'), - port=dict(default='5433'), - login_user=dict(default='dbadmin'), - login_password=dict(no_log=True), - ), supports_check_mode=True) - - if not pyodbc_found: - module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR) - - schema = module.params['schema'] - usage_roles = [] - if module.params['usage_roles']: - usage_roles = module.params['usage_roles'].split(',') - usage_roles = filter(None, usage_roles) - create_roles = [] - if module.params['create_roles']: - create_roles = module.params['create_roles'].split(',') - create_roles = filter(None, create_roles) - owner = module.params['owner'] - state = module.params['state'] - db = '' - if module.params['db']: - db = module.params['db'] - - changed = False - - try: - dsn = ( - "Driver=Vertica;" - "Server={0};" - "Port={1};" - "Database={2};" - "User={3};" - "Password={4};" - "ConnectionLoadBalance={5}" - ).format(module.params['cluster'], module.params['port'], db, - module.params['login_user'], module.params['login_password'], 'true') - db_conn = pyodbc.connect(dsn, autocommit=True) - cursor = db_conn.cursor() - except Exception as e: - module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e))) - - try: - schema_facts = get_schema_facts(cursor) - if module.check_mode: - changed = not check(schema_facts, schema, usage_roles, create_roles, owner) - elif state == 'absent': - try: - changed = absent(schema_facts, cursor, schema, usage_roles, create_roles) - except pyodbc.Error as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - elif state == 'present': - try: - changed = present(schema_facts, cursor, schema, usage_roles, create_roles, owner) - except pyodbc.Error as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - except NotSupportedError as e: - module.fail_json(msg=to_native(e), ansible_facts={'vertica_schemas': schema_facts}) - except CannotDropError as e: - module.fail_json(msg=to_native(e), ansible_facts={'vertica_schemas': schema_facts}) - except SystemExit: - # avoid catching this on python 2.4 - raise - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - module.exit_json(changed=changed, schema=schema, ansible_facts={'vertica_schemas': schema_facts}) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/database/vertica/vertica_user.py b/ansible_collections/community/general/plugins/modules/database/vertica/vertica_user.py deleted file mode 100644 index 0616c302..00000000 --- a/ansible_collections/community/general/plugins/modules/database/vertica/vertica_user.py +++ /dev/null @@ -1,385 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: vertica_user -short_description: Adds or removes Vertica database users and assigns roles. -description: - - Adds or removes Vertica database user and, optionally, assigns roles. - - A user will not be removed until all the dependencies have been dropped. - - In such a situation, if the module tries to remove the user it - will fail and only remove roles granted to the user. -options: - user: - description: - - Name of the user to add or remove. - required: true - type: str - aliases: ['name'] - profile: - description: - - Sets the user's profile. - type: str - resource_pool: - description: - - Sets the user's resource pool. - type: str - password: - description: - - The user's password encrypted by the MD5 algorithm. - - The password must be generated with the format C("md5" + md5[password + username]), - resulting in a total of 35 characters. An easy way to do this is by querying - the Vertica database with select 'md5'||md5(''). - type: str - expired: - description: - - Sets the user's password expiration. - type: bool - ldap: - description: - - Set to true if users are authenticated via LDAP. - - The user will be created with password expired and set to I($ldap$). - type: bool - roles: - description: - - Comma separated list of roles to assign to the user. - aliases: ['role'] - type: str - state: - description: - - Whether to create C(present), drop C(absent) or lock C(locked) a user. - choices: ['present', 'absent', 'locked'] - default: present - type: str - db: - description: - - Name of the Vertica database. - type: str - cluster: - description: - - Name of the Vertica cluster. - default: localhost - type: str - port: - description: - - Vertica cluster port to connect to. - default: '5433' - type: str - login_user: - description: - - The username used to authenticate with. - default: dbadmin - type: str - login_password: - description: - - The password used to authenticate with. - type: str -notes: - - The default authentication assumes that you are either logging in as or sudo'ing - to the C(dbadmin) account on the host. - - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure - that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. - - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) - to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) - and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) - to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). -requirements: [ 'unixODBC', 'pyodbc' ] -author: "Dariusz Owczarek (@dareko)" -''' - -EXAMPLES = """ -- name: Creating a new vertica user with password - community.general.vertica_user: name=user_name password=md5 db=db_name state=present - -- name: Creating a new vertica user authenticated via ldap with roles assigned - community.general.vertica_user: - name=user_name - ldap=true - db=db_name - roles=schema_name_ro - state=present -""" -import traceback - -PYODBC_IMP_ERR = None -try: - import pyodbc -except ImportError: - PYODBC_IMP_ERR = traceback.format_exc() - pyodbc_found = False -else: - pyodbc_found = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -class NotSupportedError(Exception): - pass - - -class CannotDropError(Exception): - pass - -# module specific functions - - -def get_user_facts(cursor, user=''): - facts = {} - cursor.execute(""" - select u.user_name, u.is_locked, u.lock_time, - p.password, p.acctexpired as is_expired, - u.profile_name, u.resource_pool, - u.all_roles, u.default_roles - from users u join password_auditor p on p.user_id = u.user_id - where not u.is_super_user - and (? = '' or u.user_name ilike ?) - """, user, user) - while True: - rows = cursor.fetchmany(100) - if not rows: - break - for row in rows: - user_key = row.user_name.lower() - facts[user_key] = { - 'name': row.user_name, - 'locked': str(row.is_locked), - 'password': row.password, - 'expired': str(row.is_expired), - 'profile': row.profile_name, - 'resource_pool': row.resource_pool, - 'roles': [], - 'default_roles': []} - if row.is_locked: - facts[user_key]['locked_time'] = str(row.lock_time) - if row.all_roles: - facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',') - if row.default_roles: - facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',') - return facts - - -def update_roles(user_facts, cursor, user, - existing_all, existing_default, required): - del_roles = list(set(existing_all) - set(required)) - if del_roles: - cursor.execute("revoke {0} from {1}".format(','.join(del_roles), user)) - new_roles = list(set(required) - set(existing_all)) - if new_roles: - cursor.execute("grant {0} to {1}".format(','.join(new_roles), user)) - if required: - cursor.execute("alter user {0} default role {1}".format(user, ','.join(required))) - - -def check(user_facts, user, profile, resource_pool, - locked, password, expired, ldap, roles): - user_key = user.lower() - if user_key not in user_facts: - return False - if profile and profile != user_facts[user_key]['profile']: - return False - if resource_pool and resource_pool != user_facts[user_key]['resource_pool']: - return False - if locked != (user_facts[user_key]['locked'] == 'True'): - return False - if password and password != user_facts[user_key]['password']: - return False - if (expired is not None and expired != (user_facts[user_key]['expired'] == 'True') or - ldap is not None and ldap != (user_facts[user_key]['expired'] == 'True')): - return False - if roles and (sorted(roles) != sorted(user_facts[user_key]['roles']) or - sorted(roles) != sorted(user_facts[user_key]['default_roles'])): - return False - return True - - -def present(user_facts, cursor, user, profile, resource_pool, - locked, password, expired, ldap, roles): - user_key = user.lower() - if user_key not in user_facts: - query_fragments = ["create user {0}".format(user)] - if locked: - query_fragments.append("account lock") - if password or ldap: - if password: - query_fragments.append("identified by '{0}'".format(password)) - else: - query_fragments.append("identified by '$ldap$'") - if expired or ldap: - query_fragments.append("password expire") - if profile: - query_fragments.append("profile {0}".format(profile)) - if resource_pool: - query_fragments.append("resource pool {0}".format(resource_pool)) - cursor.execute(' '.join(query_fragments)) - if resource_pool and resource_pool != 'general': - cursor.execute("grant usage on resource pool {0} to {1}".format( - resource_pool, user)) - update_roles(user_facts, cursor, user, [], [], roles) - user_facts.update(get_user_facts(cursor, user)) - return True - else: - changed = False - query_fragments = ["alter user {0}".format(user)] - if locked is not None and locked != (user_facts[user_key]['locked'] == 'True'): - if locked: - state = 'lock' - else: - state = 'unlock' - query_fragments.append("account {0}".format(state)) - changed = True - if password and password != user_facts[user_key]['password']: - query_fragments.append("identified by '{0}'".format(password)) - changed = True - if ldap: - if ldap != (user_facts[user_key]['expired'] == 'True'): - query_fragments.append("password expire") - changed = True - elif expired is not None and expired != (user_facts[user_key]['expired'] == 'True'): - if expired: - query_fragments.append("password expire") - changed = True - else: - raise NotSupportedError("Unexpiring user password is not supported.") - if profile and profile != user_facts[user_key]['profile']: - query_fragments.append("profile {0}".format(profile)) - changed = True - if resource_pool and resource_pool != user_facts[user_key]['resource_pool']: - query_fragments.append("resource pool {0}".format(resource_pool)) - if user_facts[user_key]['resource_pool'] != 'general': - cursor.execute("revoke usage on resource pool {0} from {1}".format( - user_facts[user_key]['resource_pool'], user)) - if resource_pool != 'general': - cursor.execute("grant usage on resource pool {0} to {1}".format( - resource_pool, user)) - changed = True - if changed: - cursor.execute(' '.join(query_fragments)) - if roles and (sorted(roles) != sorted(user_facts[user_key]['roles']) or - sorted(roles) != sorted(user_facts[user_key]['default_roles'])): - update_roles(user_facts, cursor, user, - user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], roles) - changed = True - if changed: - user_facts.update(get_user_facts(cursor, user)) - return changed - - -def absent(user_facts, cursor, user, roles): - user_key = user.lower() - if user_key in user_facts: - update_roles(user_facts, cursor, user, - user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], []) - try: - cursor.execute("drop user {0}".format(user_facts[user_key]['name'])) - except pyodbc.Error: - raise CannotDropError("Dropping user failed due to dependencies.") - del user_facts[user_key] - return True - else: - return False - -# module logic - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - user=dict(required=True, aliases=['name']), - profile=dict(), - resource_pool=dict(), - password=dict(no_log=True), - expired=dict(type='bool'), - ldap=dict(type='bool'), - roles=dict(aliases=['role']), - state=dict(default='present', choices=['absent', 'present', 'locked']), - db=dict(), - cluster=dict(default='localhost'), - port=dict(default='5433'), - login_user=dict(default='dbadmin'), - login_password=dict(no_log=True), - ), supports_check_mode=True) - - if not pyodbc_found: - module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR) - - user = module.params['user'] - profile = module.params['profile'] - if profile: - profile = profile.lower() - resource_pool = module.params['resource_pool'] - if resource_pool: - resource_pool = resource_pool.lower() - password = module.params['password'] - expired = module.params['expired'] - ldap = module.params['ldap'] - roles = [] - if module.params['roles']: - roles = module.params['roles'].split(',') - roles = filter(None, roles) - state = module.params['state'] - if state == 'locked': - locked = True - else: - locked = False - db = '' - if module.params['db']: - db = module.params['db'] - - changed = False - - try: - dsn = ( - "Driver=Vertica;" - "Server={0};" - "Port={1};" - "Database={2};" - "User={3};" - "Password={4};" - "ConnectionLoadBalance={5}" - ).format(module.params['cluster'], module.params['port'], db, - module.params['login_user'], module.params['login_password'], 'true') - db_conn = pyodbc.connect(dsn, autocommit=True) - cursor = db_conn.cursor() - except Exception as e: - module.fail_json(msg="Unable to connect to database: {0}.".format(e)) - - try: - user_facts = get_user_facts(cursor) - if module.check_mode: - changed = not check(user_facts, user, profile, resource_pool, - locked, password, expired, ldap, roles) - elif state == 'absent': - try: - changed = absent(user_facts, cursor, user, roles) - except pyodbc.Error as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - elif state in ['present', 'locked']: - try: - changed = present(user_facts, cursor, user, profile, resource_pool, - locked, password, expired, ldap, roles) - except pyodbc.Error as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - except NotSupportedError as e: - module.fail_json(msg=to_native(e), ansible_facts={'vertica_users': user_facts}) - except CannotDropError as e: - module.fail_json(msg=to_native(e), ansible_facts={'vertica_users': user_facts}) - except SystemExit: - # avoid catching this on python 2.4 - raise - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - module.exit_json(changed=changed, user=user, ansible_facts={'vertica_users': user_facts}) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/datadog_downtime.py b/ansible_collections/community/general/plugins/modules/datadog_downtime.py deleted file mode 120000 index 46d64dbc..00000000 --- a/ansible_collections/community/general/plugins/modules/datadog_downtime.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/datadog/datadog_downtime.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/datadog_event.py b/ansible_collections/community/general/plugins/modules/datadog_event.py deleted file mode 120000 index dbec06be..00000000 --- a/ansible_collections/community/general/plugins/modules/datadog_event.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/datadog/datadog_event.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/datadog_monitor.py b/ansible_collections/community/general/plugins/modules/datadog_monitor.py deleted file mode 120000 index 2de67778..00000000 --- a/ansible_collections/community/general/plugins/modules/datadog_monitor.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/datadog/datadog_monitor.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/dconf.py b/ansible_collections/community/general/plugins/modules/dconf.py deleted file mode 120000 index 0e61967c..00000000 --- a/ansible_collections/community/general/plugins/modules/dconf.py +++ /dev/null @@ -1 +0,0 @@ -system/dconf.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/deploy_helper.py b/ansible_collections/community/general/plugins/modules/deploy_helper.py deleted file mode 120000 index 640c71b1..00000000 --- a/ansible_collections/community/general/plugins/modules/deploy_helper.py +++ /dev/null @@ -1 +0,0 @@ -web_infrastructure/deploy_helper.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/dimensiondata_network.py b/ansible_collections/community/general/plugins/modules/dimensiondata_network.py deleted file mode 120000 index 8b29b00f..00000000 --- a/ansible_collections/community/general/plugins/modules/dimensiondata_network.py +++ /dev/null @@ -1 +0,0 @@ -cloud/dimensiondata/dimensiondata_network.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/dimensiondata_vlan.py b/ansible_collections/community/general/plugins/modules/dimensiondata_vlan.py deleted file mode 120000 index de7468e4..00000000 --- a/ansible_collections/community/general/plugins/modules/dimensiondata_vlan.py +++ /dev/null @@ -1 +0,0 @@ -cloud/dimensiondata/dimensiondata_vlan.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/discord.py b/ansible_collections/community/general/plugins/modules/discord.py deleted file mode 120000 index 82d1325f..00000000 --- a/ansible_collections/community/general/plugins/modules/discord.py +++ /dev/null @@ -1 +0,0 @@ -notification/discord.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/django_manage.py b/ansible_collections/community/general/plugins/modules/django_manage.py deleted file mode 120000 index 08a3b87d..00000000 --- a/ansible_collections/community/general/plugins/modules/django_manage.py +++ /dev/null @@ -1 +0,0 @@ -web_infrastructure/django_manage.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/dnf_versionlock.py b/ansible_collections/community/general/plugins/modules/dnf_versionlock.py deleted file mode 120000 index 7767677f..00000000 --- a/ansible_collections/community/general/plugins/modules/dnf_versionlock.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/dnf_versionlock.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/dnsimple.py b/ansible_collections/community/general/plugins/modules/dnsimple.py deleted file mode 120000 index 4640f1d3..00000000 --- a/ansible_collections/community/general/plugins/modules/dnsimple.py +++ /dev/null @@ -1 +0,0 @@ -net_tools/dnsimple.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/dnsimple_info.py b/ansible_collections/community/general/plugins/modules/dnsimple_info.py deleted file mode 120000 index 7156ca47..00000000 --- a/ansible_collections/community/general/plugins/modules/dnsimple_info.py +++ /dev/null @@ -1 +0,0 @@ -net_tools/dnsimple_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/dnsmadeeasy.py b/ansible_collections/community/general/plugins/modules/dnsmadeeasy.py deleted file mode 120000 index f7e91dce..00000000 --- a/ansible_collections/community/general/plugins/modules/dnsmadeeasy.py +++ /dev/null @@ -1 +0,0 @@ -net_tools/dnsmadeeasy.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/dpkg_divert.py b/ansible_collections/community/general/plugins/modules/dpkg_divert.py deleted file mode 120000 index b7370e9a..00000000 --- a/ansible_collections/community/general/plugins/modules/dpkg_divert.py +++ /dev/null @@ -1 +0,0 @@ -system/dpkg_divert.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/easy_install.py b/ansible_collections/community/general/plugins/modules/easy_install.py deleted file mode 120000 index b3ffe72c..00000000 --- a/ansible_collections/community/general/plugins/modules/easy_install.py +++ /dev/null @@ -1 +0,0 @@ -packaging/language/easy_install.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ejabberd_user.py b/ansible_collections/community/general/plugins/modules/ejabberd_user.py deleted file mode 120000 index 55265a16..00000000 --- a/ansible_collections/community/general/plugins/modules/ejabberd_user.py +++ /dev/null @@ -1 +0,0 @@ -web_infrastructure/ejabberd_user.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/elasticsearch_plugin.py b/ansible_collections/community/general/plugins/modules/elasticsearch_plugin.py deleted file mode 120000 index a5b1c8d1..00000000 --- a/ansible_collections/community/general/plugins/modules/elasticsearch_plugin.py +++ /dev/null @@ -1 +0,0 @@ -database/misc/elasticsearch_plugin.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/emc_vnx_sg_member.py b/ansible_collections/community/general/plugins/modules/emc_vnx_sg_member.py deleted file mode 120000 index 543d9c3a..00000000 --- a/ansible_collections/community/general/plugins/modules/emc_vnx_sg_member.py +++ /dev/null @@ -1 +0,0 @@ -storage/emc/emc_vnx_sg_member.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/etcd3.py b/ansible_collections/community/general/plugins/modules/etcd3.py deleted file mode 120000 index d9832bc5..00000000 --- a/ansible_collections/community/general/plugins/modules/etcd3.py +++ /dev/null @@ -1 +0,0 @@ -clustering/etcd3.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/facter.py b/ansible_collections/community/general/plugins/modules/facter.py deleted file mode 120000 index 7da9425e..00000000 --- a/ansible_collections/community/general/plugins/modules/facter.py +++ /dev/null @@ -1 +0,0 @@ -system/facter.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/files/archive.py b/ansible_collections/community/general/plugins/modules/files/archive.py deleted file mode 100644 index 91dc6e51..00000000 --- a/ansible_collections/community/general/plugins/modules/files/archive.py +++ /dev/null @@ -1,668 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Ben Doherty -# Sponsored by Oomph, Inc. http://www.oomphinc.com -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: archive -short_description: Creates a compressed archive of one or more files or trees -extends_documentation_fragment: files -description: - - Creates or extends an archive. - - The source and archive are on the remote host, and the archive I(is not) copied to the local host. - - Source files can be deleted after archival by specifying I(remove=True). -options: - path: - description: - - Remote absolute path, glob, or list of paths or globs for the file or files to compress or archive. - type: list - elements: path - required: true - format: - description: - - The type of compression to use. - - Support for xz was added in Ansible 2.5. - type: str - choices: [ bz2, gz, tar, xz, zip ] - default: gz - dest: - description: - - The file name of the destination archive. The parent directory must exists on the remote host. - - This is required when C(path) refers to multiple files by either specifying a glob, a directory or multiple paths in a list. - - If the destination archive already exists, it will be truncated and overwritten. - type: path - exclude_path: - description: - - Remote absolute path, glob, or list of paths or globs for the file or files to exclude from I(path) list and glob expansion. - - Use I(exclusion_patterns) to instead exclude files or subdirectories below any of the paths from the I(path) list. - type: list - elements: path - default: [] - exclusion_patterns: - description: - - Glob style patterns to exclude files or directories from the resulting archive. - - This differs from I(exclude_path) which applies only to the source paths from I(path). - type: list - elements: path - version_added: 3.2.0 - force_archive: - description: - - Allows you to force the module to treat this as an archive even if only a single file is specified. - - By default when a single file is specified it is compressed only (not archived). - - Enable this if you want to use M(ansible.builtin.unarchive) on an archive of a single file created with this module. - type: bool - default: false - remove: - description: - - Remove any added source files and trees after adding to archive. - type: bool - default: no -notes: - - Requires tarfile, zipfile, gzip and bzip2 packages on target host. - - Requires lzma or backports.lzma if using xz format. - - Can produce I(gzip), I(bzip2), I(lzma) and I(zip) compressed files or archives. -seealso: -- module: ansible.builtin.unarchive -author: -- Ben Doherty (@bendoh) -''' - -EXAMPLES = r''' -- name: Compress directory /path/to/foo/ into /path/to/foo.tgz - community.general.archive: - path: /path/to/foo - dest: /path/to/foo.tgz - -- name: Compress regular file /path/to/foo into /path/to/foo.gz and remove it - community.general.archive: - path: /path/to/foo - remove: yes - -- name: Create a zip archive of /path/to/foo - community.general.archive: - path: /path/to/foo - format: zip - -- name: Create a bz2 archive of multiple files, rooted at /path - community.general.archive: - path: - - /path/to/foo - - /path/wong/foo - dest: /path/file.tar.bz2 - format: bz2 - -- name: Create a bz2 archive of a globbed path, while excluding specific dirnames - community.general.archive: - path: - - /path/to/foo/* - dest: /path/file.tar.bz2 - exclude_path: - - /path/to/foo/bar - - /path/to/foo/baz - format: bz2 - -- name: Create a bz2 archive of a globbed path, while excluding a glob of dirnames - community.general.archive: - path: - - /path/to/foo/* - dest: /path/file.tar.bz2 - exclude_path: - - /path/to/foo/ba* - format: bz2 - -- name: Use gzip to compress a single archive (i.e don't archive it first with tar) - community.general.archive: - path: /path/to/foo/single.file - dest: /path/file.gz - format: gz - -- name: Create a tar.gz archive of a single file. - community.general.archive: - path: /path/to/foo/single.file - dest: /path/file.tar.gz - format: gz - force_archive: true -''' - -RETURN = r''' -state: - description: - The state of the input C(path). - type: str - returned: always -dest_state: - description: - - The state of the I(dest) file. - - C(absent) when the file does not exist. - - C(archive) when the file is an archive. - - C(compress) when the file is compressed, but not an archive. - - C(incomplete) when the file is an archive, but some files under I(path) were not found. - type: str - returned: success - version_added: 3.4.0 -missing: - description: Any files that were missing from the source. - type: list - returned: success -archived: - description: Any files that were compressed or added to the archive. - type: list - returned: success -arcroot: - description: The archive root. - type: str - returned: always -expanded_paths: - description: The list of matching paths from paths argument. - type: list - returned: always -expanded_exclude_paths: - description: The list of matching exclude paths from the exclude_path argument. - type: list - returned: always -''' - -import abc -import bz2 -import glob -import gzip -import io -import os -import re -import shutil -import tarfile -import zipfile -from fnmatch import fnmatch -from sys import version_info -from traceback import format_exc -from zlib import crc32 - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_bytes, to_native -from ansible.module_utils import six - - -LZMA_IMP_ERR = None -if six.PY3: - try: - import lzma - HAS_LZMA = True - except ImportError: - LZMA_IMP_ERR = format_exc() - HAS_LZMA = False -else: - try: - from backports import lzma - HAS_LZMA = True - except ImportError: - LZMA_IMP_ERR = format_exc() - HAS_LZMA = False - -PY27 = version_info[0:2] >= (2, 7) - -STATE_ABSENT = 'absent' -STATE_ARCHIVED = 'archive' -STATE_COMPRESSED = 'compress' -STATE_INCOMPLETE = 'incomplete' - - -def common_path(paths): - empty = b'' if paths and isinstance(paths[0], six.binary_type) else '' - - return os.path.join( - os.path.dirname(os.path.commonprefix([os.path.join(os.path.dirname(p), empty) for p in paths])), empty - ) - - -def expand_paths(paths): - expanded_path = [] - is_globby = False - for path in paths: - b_path = _to_bytes(path) - if b'*' in b_path or b'?' in b_path: - e_paths = glob.glob(b_path) - is_globby = True - else: - e_paths = [b_path] - expanded_path.extend(e_paths) - return expanded_path, is_globby - - -def matches_exclusion_patterns(path, exclusion_patterns): - return any(fnmatch(path, p) for p in exclusion_patterns) - - -def is_archive(path): - return re.search(br'\.(tar|tar\.(gz|bz2|xz)|tgz|tbz2|zip)$', os.path.basename(path), re.IGNORECASE) - - -def strip_prefix(prefix, string): - return string[len(prefix):] if string.startswith(prefix) else string - - -def _to_bytes(s): - return to_bytes(s, errors='surrogate_or_strict') - - -def _to_native(s): - return to_native(s, errors='surrogate_or_strict') - - -def _to_native_ascii(s): - return to_native(s, errors='surrogate_or_strict', encoding='ascii') - - -@six.add_metaclass(abc.ABCMeta) -class Archive(object): - def __init__(self, module): - self.module = module - - self.destination = _to_bytes(module.params['dest']) if module.params['dest'] else None - self.exclusion_patterns = module.params['exclusion_patterns'] or [] - self.format = module.params['format'] - self.must_archive = module.params['force_archive'] - self.remove = module.params['remove'] - - self.changed = False - self.destination_state = STATE_ABSENT - self.errors = [] - self.file = None - self.successes = [] - self.targets = [] - self.not_found = [] - - paths = module.params['path'] - self.expanded_paths, has_globs = expand_paths(paths) - self.expanded_exclude_paths = expand_paths(module.params['exclude_path'])[0] - - self.paths = sorted(set(self.expanded_paths) - set(self.expanded_exclude_paths)) - - if not self.paths: - module.fail_json( - path=', '.join(paths), - expanded_paths=_to_native(b', '.join(self.expanded_paths)), - expanded_exclude_paths=_to_native(b', '.join(self.expanded_exclude_paths)), - msg='Error, no source paths were found' - ) - - self.root = common_path(self.paths) - - if not self.must_archive: - self.must_archive = any([has_globs, os.path.isdir(self.paths[0]), len(self.paths) > 1]) - - if not self.destination and not self.must_archive: - self.destination = b'%s.%s' % (self.paths[0], _to_bytes(self.format)) - - if self.must_archive and not self.destination: - module.fail_json( - dest=_to_native(self.destination), - path=', '.join(paths), - msg='Error, must specify "dest" when archiving multiple files or trees' - ) - - if self.remove: - self._check_removal_safety() - - self.original_checksums = self.destination_checksums() - self.original_size = self.destination_size() - - def add(self, path, archive_name): - try: - self._add(_to_native_ascii(path), _to_native(archive_name)) - if self.contains(_to_native(archive_name)): - self.successes.append(path) - except Exception as e: - self.errors.append('%s: %s' % (_to_native_ascii(path), _to_native(e))) - - def add_single_target(self, path): - if self.format in ('zip', 'tar'): - self.open() - self.add(path, strip_prefix(self.root, path)) - self.close() - self.destination_state = STATE_ARCHIVED - else: - try: - f_out = self._open_compressed_file(_to_native_ascii(self.destination), 'wb') - with open(path, 'rb') as f_in: - shutil.copyfileobj(f_in, f_out) - f_out.close() - self.successes.append(path) - self.destination_state = STATE_COMPRESSED - except (IOError, OSError) as e: - self.module.fail_json( - path=_to_native(path), - dest=_to_native(self.destination), - msg='Unable to write to compressed file: %s' % _to_native(e), exception=format_exc() - ) - - def add_targets(self): - self.open() - try: - for target in self.targets: - if os.path.isdir(target): - for directory_path, directory_names, file_names in os.walk(target, topdown=True): - for directory_name in directory_names: - full_path = os.path.join(directory_path, directory_name) - self.add(full_path, strip_prefix(self.root, full_path)) - - for file_name in file_names: - full_path = os.path.join(directory_path, file_name) - self.add(full_path, strip_prefix(self.root, full_path)) - else: - self.add(target, strip_prefix(self.root, target)) - except Exception as e: - if self.format in ('zip', 'tar'): - archive_format = self.format - else: - archive_format = 'tar.' + self.format - self.module.fail_json( - msg='Error when writing %s archive at %s: %s' % ( - archive_format, _to_native(self.destination), _to_native(e) - ), - exception=format_exc() - ) - self.close() - - if self.errors: - self.module.fail_json( - msg='Errors when writing archive at %s: %s' % (_to_native(self.destination), '; '.join(self.errors)) - ) - - def is_different_from_original(self): - if self.original_checksums is None: - return self.original_size != self.destination_size() - else: - return self.original_checksums != self.destination_checksums() - - def destination_checksums(self): - if self.destination_exists() and self.destination_readable(): - return self._get_checksums(self.destination) - return None - - def destination_exists(self): - return self.destination and os.path.exists(self.destination) - - def destination_readable(self): - return self.destination and os.access(self.destination, os.R_OK) - - def destination_size(self): - return os.path.getsize(self.destination) if self.destination_exists() else 0 - - def find_targets(self): - for path in self.paths: - if not os.path.lexists(path): - self.not_found.append(path) - else: - self.targets.append(path) - - def has_targets(self): - return bool(self.targets) - - def has_unfound_targets(self): - return bool(self.not_found) - - def remove_single_target(self, path): - try: - os.remove(path) - except OSError as e: - self.module.fail_json( - path=_to_native(path), - msg='Unable to remove source file: %s' % _to_native(e), exception=format_exc() - ) - - def remove_targets(self): - for path in self.successes: - if os.path.exists(path): - try: - if os.path.isdir(path): - shutil.rmtree(path) - else: - os.remove(path) - except OSError: - self.errors.append(_to_native(path)) - for path in self.paths: - try: - if os.path.isdir(path): - shutil.rmtree(path) - except OSError: - self.errors.append(_to_native(path)) - - if self.errors: - self.module.fail_json( - dest=_to_native(self.destination), msg='Error deleting some source files: ', files=self.errors - ) - - def update_permissions(self): - try: - file_args = self.module.load_file_common_arguments(self.module.params, path=self.destination) - except TypeError: - # The path argument is only supported in Ansible-base 2.10+. Fall back to - # pre-2.10 behavior for older Ansible versions. - self.module.params['path'] = self.destination - file_args = self.module.load_file_common_arguments(self.module.params) - - self.changed = self.module.set_fs_attributes_if_different(file_args, self.changed) - - @property - def result(self): - return { - 'archived': [_to_native(p) for p in self.successes], - 'dest': _to_native(self.destination), - 'dest_state': self.destination_state, - 'changed': self.changed, - 'arcroot': _to_native(self.root), - 'missing': [_to_native(p) for p in self.not_found], - 'expanded_paths': [_to_native(p) for p in self.expanded_paths], - 'expanded_exclude_paths': [_to_native(p) for p in self.expanded_exclude_paths], - } - - def _check_removal_safety(self): - for path in self.paths: - if os.path.isdir(path) and self.destination.startswith(os.path.join(path, b'')): - self.module.fail_json( - path=b', '.join(self.paths), - msg='Error, created archive can not be contained in source paths when remove=true' - ) - - def _open_compressed_file(self, path, mode): - f = None - if self.format == 'gz': - f = gzip.open(path, mode) - elif self.format == 'bz2': - f = bz2.BZ2File(path, mode) - elif self.format == 'xz': - f = lzma.LZMAFile(path, mode) - else: - self.module.fail_json(msg="%s is not a valid format" % self.format) - - return f - - @abc.abstractmethod - def close(self): - pass - - @abc.abstractmethod - def contains(self, name): - pass - - @abc.abstractmethod - def open(self): - pass - - @abc.abstractmethod - def _add(self, path, archive_name): - pass - - @abc.abstractmethod - def _get_checksums(self, path): - pass - - -class ZipArchive(Archive): - def __init__(self, module): - super(ZipArchive, self).__init__(module) - - def close(self): - self.file.close() - - def contains(self, name): - try: - self.file.getinfo(name) - except KeyError: - return False - return True - - def open(self): - self.file = zipfile.ZipFile(_to_native_ascii(self.destination), 'w', zipfile.ZIP_DEFLATED, True) - - def _add(self, path, archive_name): - if not matches_exclusion_patterns(path, self.exclusion_patterns): - self.file.write(path, archive_name) - - def _get_checksums(self, path): - try: - archive = zipfile.ZipFile(_to_native_ascii(path), 'r') - checksums = set((info.filename, info.CRC) for info in archive.infolist()) - archive.close() - except zipfile.BadZipfile: - checksums = set() - return checksums - - -class TarArchive(Archive): - def __init__(self, module): - super(TarArchive, self).__init__(module) - self.fileIO = None - - def close(self): - self.file.close() - if self.format == 'xz': - with lzma.open(_to_native(self.destination), 'wb') as f: - f.write(self.fileIO.getvalue()) - self.fileIO.close() - - def contains(self, name): - try: - self.file.getmember(name) - except KeyError: - return False - return True - - def open(self): - if self.format in ('gz', 'bz2'): - self.file = tarfile.open(_to_native_ascii(self.destination), 'w|' + self.format) - # python3 tarfile module allows xz format but for python2 we have to create the tarfile - # in memory and then compress it with lzma. - elif self.format == 'xz': - self.fileIO = io.BytesIO() - self.file = tarfile.open(fileobj=self.fileIO, mode='w') - elif self.format == 'tar': - self.file = tarfile.open(_to_native_ascii(self.destination), 'w') - else: - self.module.fail_json(msg="%s is not a valid archive format" % self.format) - - def _add(self, path, archive_name): - def py27_filter(tarinfo): - return None if matches_exclusion_patterns(tarinfo.name, self.exclusion_patterns) else tarinfo - - def py26_filter(path): - return matches_exclusion_patterns(path, self.exclusion_patterns) - - if PY27: - self.file.add(path, archive_name, recursive=False, filter=py27_filter) - else: - self.file.add(path, archive_name, recursive=False, exclude=py26_filter) - - def _get_checksums(self, path): - try: - if self.format == 'xz': - with lzma.open(_to_native_ascii(path), 'r') as f: - archive = tarfile.open(fileobj=f) - checksums = set((info.name, info.chksum) for info in archive.getmembers()) - archive.close() - else: - archive = tarfile.open(_to_native_ascii(path), 'r|' + self.format) - checksums = set((info.name, info.chksum) for info in archive.getmembers()) - archive.close() - except (lzma.LZMAError, tarfile.ReadError, tarfile.CompressionError): - try: - # The python implementations of gzip, bz2, and lzma do not support restoring compressed files - # to their original names so only file checksum is returned - f = self._open_compressed_file(_to_native_ascii(path), 'r') - checksums = set([(b'', crc32(f.read()))]) - f.close() - except Exception: - checksums = set() - return checksums - - -def get_archive(module): - if module.params['format'] == 'zip': - return ZipArchive(module) - else: - return TarArchive(module) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - path=dict(type='list', elements='path', required=True), - format=dict(type='str', default='gz', choices=['bz2', 'gz', 'tar', 'xz', 'zip']), - dest=dict(type='path'), - exclude_path=dict(type='list', elements='path', default=[]), - exclusion_patterns=dict(type='list', elements='path'), - force_archive=dict(type='bool', default=False), - remove=dict(type='bool', default=False), - ), - add_file_common_args=True, - supports_check_mode=True, - ) - - if not HAS_LZMA and module.params['format'] == 'xz': - module.fail_json( - msg=missing_required_lib("lzma or backports.lzma", reason="when using xz format"), exception=LZMA_IMP_ERR - ) - - check_mode = module.check_mode - - archive = get_archive(module) - archive.find_targets() - - if not archive.has_targets(): - if archive.destination_exists(): - archive.destination_state = STATE_ARCHIVED if is_archive(archive.destination) else STATE_COMPRESSED - elif archive.has_targets() and archive.must_archive: - if check_mode: - archive.changed = True - else: - archive.add_targets() - archive.destination_state = STATE_INCOMPLETE if archive.has_unfound_targets() else STATE_ARCHIVED - archive.changed |= archive.is_different_from_original() - if archive.remove: - archive.remove_targets() - else: - if check_mode: - if not archive.destination_exists(): - archive.changed = True - else: - path = archive.paths[0] - archive.add_single_target(path) - archive.changed |= archive.is_different_from_original() - if archive.remove: - archive.remove_single_target(path) - - if archive.destination_exists(): - archive.update_permissions() - - module.exit_json(**archive.result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/files/filesize.py b/ansible_collections/community/general/plugins/modules/files/filesize.py deleted file mode 100644 index 83edbe58..00000000 --- a/ansible_collections/community/general/plugins/modules/files/filesize.py +++ /dev/null @@ -1,487 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, quidame -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: filesize - -short_description: Create a file with a given size, or resize it if it exists - -description: - - This module is a simple wrapper around C(dd) to create, extend or truncate - a file, given its size. It can be used to manage swap files (that require - contiguous blocks) or alternatively, huge sparse files. - -author: - - quidame (@quidame) - -version_added: "3.0.0" - -options: - path: - description: - - Path of the regular file to create or resize. - type: path - required: true - size: - description: - - Requested size of the file. - - The value is a number (either C(int) or C(float)) optionally followed - by a multiplicative suffix, that can be one of C(B) (bytes), C(KB) or - C(kB) (= 1000B), C(MB) or C(mB) (= 1000kB), C(GB) or C(gB) (= 1000MB), - and so on for C(T), C(P), C(E), C(Z) and C(Y); or alternatively one of - C(K), C(k) or C(KiB) (= 1024B); C(M), C(m) or C(MiB) (= 1024KiB); - C(G), C(g) or C(GiB) (= 1024MiB); and so on. - - If the multiplicative suffix is not provided, the value is treated as - an integer number of blocks of I(blocksize) bytes each (float values - are rounded to the closest integer). - - When the I(size) value is equal to the current file size, does nothing. - - When the I(size) value is bigger than the current file size, bytes from - I(source) (if I(sparse) is not C(false)) are appended to the file - without truncating it, in other words, without modifying the existing - bytes of the file. - - When the I(size) value is smaller than the current file size, it is - truncated to the requested value without modifying bytes before this - value. - - That means that a file of any arbitrary size can be grown to any other - arbitrary size, and then resized down to its initial size without - modifying its initial content. - type: raw - required: true - blocksize: - description: - - Size of blocks, in bytes if not followed by a multiplicative suffix. - - The numeric value (before the unit) C(MUST) be an integer (or a C(float) - if it equals an integer). - - If not set, the size of blocks is guessed from the OS and commonly - results in C(512) or C(4096) bytes, that is used internally by the - module or when I(size) has no unit. - type: raw - source: - description: - - Device or file that provides input data to provision the file. - - This parameter is ignored when I(sparse=true). - type: path - default: /dev/zero - force: - description: - - Whether or not to overwrite the file if it exists, in other words, to - truncate it from 0. When C(true), the module is not idempotent, that - means it always reports I(changed=true). - - I(force=true) and I(sparse=true) are mutually exclusive. - type: bool - default: false - sparse: - description: - - Whether or not the file to create should be a sparse file. - - This option is effective only on newly created files, or when growing a - file, only for the bytes to append. - - This option is not supported on OSes or filesystems not supporting sparse files. - - I(force=true) and I(sparse=true) are mutually exclusive. - type: bool - default: false - unsafe_writes: - description: - - This option is silently ignored. This module always modifies file - size in-place. - -notes: - - This module supports C(check_mode) and C(diff). - -requirements: - - dd (Data Duplicator) in PATH - -extends_documentation_fragment: - - ansible.builtin.files - -seealso: - - name: dd(1) manpage for Linux - description: Manual page of the GNU/Linux's dd implementation (from GNU coreutils). - link: https://man7.org/linux/man-pages/man1/dd.1.html - - - name: dd(1) manpage for IBM AIX - description: Manual page of the IBM AIX's dd implementation. - link: https://www.ibm.com/support/knowledgecenter/ssw_aix_72/d_commands/dd.html - - - name: dd(1) manpage for Mac OSX - description: Manual page of the Mac OSX's dd implementation. - link: https://www.unix.com/man-page/osx/1/dd/ - - - name: dd(1M) manpage for Solaris - description: Manual page of the Oracle Solaris's dd implementation. - link: https://docs.oracle.com/cd/E36784_01/html/E36871/dd-1m.html - - - name: dd(1) manpage for FreeBSD - description: Manual page of the FreeBSD's dd implementation. - link: https://www.freebsd.org/cgi/man.cgi?dd(1) - - - name: dd(1) manpage for OpenBSD - description: Manual page of the OpenBSD's dd implementation. - link: https://man.openbsd.org/dd - - - name: dd(1) manpage for NetBSD - description: Manual page of the NetBSD's dd implementation. - link: https://man.netbsd.org/dd.1 - - - name: busybox(1) manpage for Linux - description: Manual page of the GNU/Linux's busybox, that provides its own dd implementation. - link: https://www.unix.com/man-page/linux/1/busybox -''' - -EXAMPLES = r''' -- name: Create a file of 1G filled with null bytes - community.general.filesize: - path: /var/bigfile - size: 1G - -- name: Extend the file to 2G (2*1024^3) - community.general.filesize: - path: /var/bigfile - size: 2G - -- name: Reduce the file to 2GB (2*1000^3) - community.general.filesize: - path: /var/bigfile - size: 2GB - -- name: Fill a file with random bytes for backing a LUKS device - community.general.filesize: - path: ~/diskimage.luks - size: 512.0 MiB - source: /dev/urandom - -- name: Take a backup of MBR boot code into a file, overwriting it if it exists - community.general.filesize: - path: /media/sdb1/mbr.bin - size: 440B - source: /dev/sda - force: true - -- name: Create/resize a sparse file of/to 8TB - community.general.filesize: - path: /var/local/sparsefile - size: 8TB - sparse: true - -- name: Create a file with specific size and attributes, to be used as swap space - community.general.filesize: - path: /var/swapfile - size: 2G - blocksize: 512B - mode: u=rw,go= - owner: root - group: root -''' - -RETURN = r''' -cmd: - description: Command executed to create or resize the file. - type: str - returned: when changed or failed - sample: /usr/bin/dd if=/dev/zero of=/var/swapfile bs=1048576 seek=3072 count=1024 - -filesize: - description: Dictionary of sizes related to the file. - type: dict - returned: always - contains: - blocks: - description: Number of blocks in the file. - type: int - sample: 500 - blocksize: - description: Size of the blocks in bytes. - type: int - sample: 1024 - bytes: - description: Size of the file, in bytes, as the product of C(blocks) and C(blocksize). - type: int - sample: 512000 - iec: - description: Size of the file, in human-readable format, following IEC standard. - type: str - sample: 500.0 KiB - si: - description: Size of the file, in human-readable format, following SI standard. - type: str - sample: 512.0 kB - -size_diff: - description: Difference (positive or negative) between old size and new size, in bytes. - type: int - sample: -1234567890 - returned: always - -path: - description: Realpath of the file if it is a symlink, otherwise the same than module's param. - type: str - sample: /var/swap0 - returned: always -''' - - -import re -import os -import math - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - - -# These are the multiplicative suffixes understood (or returned) by dd and -# others (ls, df, lvresize, lsblk...). -SIZE_UNITS = dict( - B=1, - kB=1000**1, KB=1000**1, KiB=1024**1, K=1024**1, k=1024**1, - MB=1000**2, mB=1000**2, MiB=1024**2, M=1024**2, m=1024**2, - GB=1000**3, gB=1000**3, GiB=1024**3, G=1024**3, g=1024**3, - TB=1000**4, tB=1000**4, TiB=1024**4, T=1024**4, t=1024**4, - PB=1000**5, pB=1000**5, PiB=1024**5, P=1024**5, p=1024**5, - EB=1000**6, eB=1000**6, EiB=1024**6, E=1024**6, e=1024**6, - ZB=1000**7, zB=1000**7, ZiB=1024**7, Z=1024**7, z=1024**7, - YB=1000**8, yB=1000**8, YiB=1024**8, Y=1024**8, y=1024**8, -) - - -def bytes_to_human(size, iec=False): - """Return human-readable size (with SI or IEC suffix) from bytes. This is - only to populate the returned result of the module, not to handle the - file itself (we only rely on bytes for that). - """ - unit = 'B' - for (u, v) in SIZE_UNITS.items(): - if size < v: - continue - if iec: - if 'i' not in u or size / v >= 1024: - continue - else: - if v % 5 or size / v >= 1000: - continue - unit = u - - hsize = round(size / SIZE_UNITS[unit], 2) - if unit == 'B': - hsize = int(hsize) - - unit = re.sub(r'^(.)', lambda m: m.expand(r'\1').upper(), unit) - if unit == 'KB': - unit = 'kB' - - return '%s %s' % (str(hsize), unit) - - -def smart_blocksize(size, unit, product, bsize): - """Ensure the total size can be written as blocks*blocksize, with blocks - and blocksize being integers. - """ - if not product % bsize: - return bsize - - # Basically, for a file of 8kB (=8000B), system's block size of 4096 bytes - # is not usable. The smallest integer number of kB to work with 512B blocks - # is 64, the nexts are 128, 192, 256, and so on. - - unit_size = SIZE_UNITS[unit] - - if size == int(size): - if unit_size > SIZE_UNITS['MiB']: - if unit_size % 5: - return SIZE_UNITS['MiB'] - return SIZE_UNITS['MB'] - return unit_size - - if unit == 'B': - raise AssertionError("byte is the smallest unit and requires an integer value") - - if 0 < product < bsize: - return product - - for bsz in (1024, 1000, 512, 256, 128, 100, 64, 32, 16, 10, 8, 4, 2): - if not product % bsz: - return bsz - return 1 - - -def split_size_unit(string, isint=False): - """Split a string between the size value (int or float) and the unit. - Support optional space(s) between the numeric value and the unit. - """ - unit = re.sub(r'(\d|\.)', r'', string).strip() - value = float(re.sub(r'%s' % unit, r'', string).strip()) - if isint and unit in ('B', ''): - if int(value) != value: - raise AssertionError("invalid blocksize value: bytes require an integer value") - - if not unit: - unit = None - product = int(round(value)) - else: - if unit not in SIZE_UNITS.keys(): - raise AssertionError("invalid size unit (%s): unit must be one of %s, or none." % - (unit, ', '.join(sorted(SIZE_UNITS, key=SIZE_UNITS.get)))) - product = int(round(value * SIZE_UNITS[unit])) - return value, unit, product - - -def size_string(value): - """Convert a raw value to a string, but only if it is an integer, a float - or a string itself. - """ - if not isinstance(value, (int, float, str)): - raise AssertionError("invalid value type (%s): size must be integer, float or string" % type(value)) - return str(value) - - -def size_spec(args): - """Return a dictionary with size specifications, especially the size in - bytes (after rounding it to an integer number of blocks). - """ - blocksize_in_bytes = split_size_unit(args['blocksize'], True)[2] - if blocksize_in_bytes == 0: - raise AssertionError("block size cannot be equal to zero") - - size_value, size_unit, size_result = split_size_unit(args['size']) - if not size_unit: - blocks = int(math.ceil(size_value)) - else: - blocksize_in_bytes = smart_blocksize(size_value, size_unit, size_result, blocksize_in_bytes) - blocks = int(math.ceil(size_result / blocksize_in_bytes)) - - args['size_diff'] = round_bytes = int(blocks * blocksize_in_bytes) - args['size_spec'] = dict(blocks=blocks, blocksize=blocksize_in_bytes, bytes=round_bytes, - iec=bytes_to_human(round_bytes, True), - si=bytes_to_human(round_bytes)) - return args['size_spec'] - - -def current_size(args): - """Return the size of the file at the given location if it exists, or None.""" - path = args['path'] - if os.path.exists(path): - if not os.path.isfile(path): - raise AssertionError("%s exists but is not a regular file" % path) - args['file_size'] = os.stat(path).st_size - else: - args['file_size'] = None - return args['file_size'] - - -def complete_dd_cmdline(args, dd_cmd): - """Compute dd options to grow or truncate a file.""" - if args['file_size'] == args['size_spec']['bytes'] and not args['force']: - # Nothing to do. - return list() - - bs = args['size_spec']['blocksize'] - - # For sparse files (create, truncate, grow): write count=0 block. - if args['sparse']: - seek = args['size_spec']['blocks'] - elif args['force'] or not os.path.exists(args['path']): # Create file - seek = 0 - elif args['size_diff'] < 0: # Truncate file - seek = args['size_spec']['blocks'] - elif args['size_diff'] % bs: # Grow file - seek = int(args['file_size'] / bs) + 1 - else: - seek = int(args['file_size'] / bs) - - count = args['size_spec']['blocks'] - seek - dd_cmd += ['bs=%s' % str(bs), 'seek=%s' % str(seek), 'count=%s' % str(count)] - - return dd_cmd - - -def main(): - module = AnsibleModule( - argument_spec=dict( - path=dict(type='path', required=True), - size=dict(type='raw', required=True), - blocksize=dict(type='raw'), - source=dict(type='path', default='/dev/zero'), - sparse=dict(type='bool', default=False), - force=dict(type='bool', default=False), - ), - supports_check_mode=True, - add_file_common_args=True, - ) - args = dict(**module.params) - diff = dict(before=dict(), after=dict()) - - if args['sparse'] and args['force']: - module.fail_json(msg='parameters values are mutually exclusive: force=true|sparse=true') - if not os.path.exists(os.path.dirname(args['path'])): - module.fail_json(msg='parent directory of the file must exist prior to run this module') - if not args['blocksize']: - args['blocksize'] = str(os.statvfs(os.path.dirname(args['path'])).f_frsize) - - try: - args['size'] = size_string(args['size']) - args['blocksize'] = size_string(args['blocksize']) - initial_filesize = current_size(args) - size_descriptors = size_spec(args) - except AssertionError as err: - module.fail_json(msg=to_native(err)) - - expected_filesize = size_descriptors['bytes'] - if initial_filesize: - args['size_diff'] = expected_filesize - initial_filesize - diff['after']['size'] = expected_filesize - diff['before']['size'] = initial_filesize - - result = dict( - changed=args['force'], - size_diff=args['size_diff'], - path=args['path'], - filesize=size_descriptors) - - dd_bin = module.get_bin_path('dd', True) - dd_cmd = [dd_bin, 'if=%s' % args['source'], 'of=%s' % args['path']] - - if expected_filesize != initial_filesize or args['force']: - result['cmd'] = ' '.join(complete_dd_cmdline(args, dd_cmd)) - if module.check_mode: - result['changed'] = True - else: - result['rc'], dummy, result['stderr'] = module.run_command(dd_cmd) - - diff['after']['size'] = result_filesize = result['size_diff'] = current_size(args) - if initial_filesize: - result['size_diff'] = result_filesize - initial_filesize - if not args['force']: - result['changed'] = result_filesize != initial_filesize - - if result['rc']: - msg = "dd error while creating file %s with size %s from source %s: see stderr for details" % ( - args['path'], args['size'], args['source']) - module.fail_json(msg=msg, **result) - if result_filesize != expected_filesize: - msg = "module error while creating file %s with size %s from source %s: file is %s bytes long" % ( - args['path'], args['size'], args['source'], result_filesize) - module.fail_json(msg=msg, **result) - - # dd follows symlinks, and so does this module, while file module doesn't. - # If we call it, this is to manage file's mode, owner and so on, not the - # symlink's ones. - file_params = dict(**module.params) - if os.path.islink(args['path']): - file_params['path'] = result['path'] = os.path.realpath(args['path']) - - if args['file_size'] is not None: - file_args = module.load_file_common_arguments(file_params) - result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed'], diff=diff) - result['diff'] = diff - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/files/ini_file.py b/ansible_collections/community/general/plugins/modules/files/ini_file.py deleted file mode 100644 index 79d373f3..00000000 --- a/ansible_collections/community/general/plugins/modules/files/ini_file.py +++ /dev/null @@ -1,483 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2012, Jan-Piet Mens -# Copyright: (c) 2015, Ales Nosek -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: ini_file -short_description: Tweak settings in INI files -extends_documentation_fragment: files -description: - - Manage (add, remove, change) individual settings in an INI-style file without having - to manage the file as a whole with, say, M(ansible.builtin.template) or M(ansible.builtin.assemble). - - Adds missing sections if they don't exist. - - Before Ansible 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file. - - Since Ansible 2.3, this module adds missing ending newlines to files to keep in line with the POSIX standard, even when - no other modifications need to be applied. -options: - path: - description: - - Path to the INI-style file; this file is created if required. - - Before Ansible 2.3 this option was only usable as I(dest). - type: path - required: true - aliases: [ dest ] - section: - description: - - Section name in INI file. This is added if C(state=present) automatically when - a single value is being set. - - If left empty or set to C(null), the I(option) will be placed before the first I(section). - - Using C(null) is also required if the config format does not support sections. - type: str - required: true - option: - description: - - If set (required for changing a I(value)), this is the name of the option. - - May be omitted if adding/removing a whole I(section). - type: str - value: - description: - - The string value to be associated with an I(option). - - May be omitted when removing an I(option). - - Mutually exclusive with I(values). - - I(value=v) is equivalent to I(values=[v]). - type: str - values: - description: - - The string value to be associated with an I(option). - - May be omitted when removing an I(option). - - Mutually exclusive with I(value). - - I(value=v) is equivalent to I(values=[v]). - type: list - elements: str - version_added: 3.6.0 - backup: - description: - - Create a backup file including the timestamp information so you can get - the original file back if you somehow clobbered it incorrectly. - type: bool - default: no - state: - description: - - If set to C(absent) and I(exclusive) set to C(yes) all matching I(option) lines are removed. - - If set to C(absent) and I(exclusive) set to C(no) the specified C(option=value) lines are removed, - but the other I(option)s with the same name are not touched. - - If set to C(present) and I(exclusive) set to C(no) the specified C(option=values) lines are added, - but the other I(option)s with the same name are not touched. - - If set to C(present) and I(exclusive) set to C(yes) all given C(option=values) lines will be - added and the other I(option)s with the same name are removed. - type: str - choices: [ absent, present ] - default: present - exclusive: - description: - - If set to C(yes) (default), all matching I(option) lines are removed when I(state=absent), - or replaced when I(state=present). - - If set to C(no), only the specified I(value(s)) are added when I(state=present), - or removed when I(state=absent), and existing ones are not modified. - type: bool - default: yes - version_added: 3.6.0 - no_extra_spaces: - description: - - Do not insert spaces before and after '=' symbol. - type: bool - default: no - create: - description: - - If set to C(no), the module will fail if the file does not already exist. - - By default it will create the file if it is missing. - type: bool - default: yes - allow_no_value: - description: - - Allow option without value and without '=' symbol. - type: bool - default: no -notes: - - While it is possible to add an I(option) without specifying a I(value), this makes no sense. - - As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well. - - As of community.general 3.2.0, UTF-8 BOM markers are discarded when reading files. -author: - - Jan-Piet Mens (@jpmens) - - Ales Nosek (@noseka1) -''' - -EXAMPLES = r''' -# Before Ansible 2.3, option 'dest' was used instead of 'path' -- name: Ensure "fav=lemonade is in section "[drinks]" in specified file - community.general.ini_file: - path: /etc/conf - section: drinks - option: fav - value: lemonade - mode: '0600' - backup: yes - -- name: Ensure "temperature=cold is in section "[drinks]" in specified file - community.general.ini_file: - path: /etc/anotherconf - section: drinks - option: temperature - value: cold - backup: yes - -- name: Add "beverage=lemon juice" is in section "[drinks]" in specified file - community.general.ini_file: - path: /etc/conf - section: drinks - option: beverage - value: lemon juice - mode: '0600' - state: present - exclusive: no - -- name: Ensure multiple values "beverage=coke" and "beverage=pepsi" are in section "[drinks]" in specified file - community.general.ini_file: - path: /etc/conf - section: drinks - option: beverage - values: - - coke - - pepsi - mode: '0600' - state: present -''' - -import io -import os -import re -import tempfile -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_bytes, to_text - - -def match_opt(option, line): - option = re.escape(option) - return re.match('[#;]?( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line) - - -def match_active_opt(option, line): - option = re.escape(option) - return re.match('( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line) - - -def update_section_line(changed, section_lines, index, changed_lines, newline, msg): - option_changed = section_lines[index] != newline - changed = changed or option_changed - if option_changed: - msg = 'option changed' - section_lines[index] = newline - changed_lines[index] = 1 - return (changed, msg) - - -def do_ini(module, filename, section=None, option=None, values=None, - state='present', exclusive=True, backup=False, no_extra_spaces=False, - create=True, allow_no_value=False): - - if section is not None: - section = to_text(section) - if option is not None: - option = to_text(option) - - # deduplicate entries in values - values_unique = [] - [values_unique.append(to_text(value)) for value in values if value not in values_unique and value is not None] - values = values_unique - - diff = dict( - before='', - after='', - before_header='%s (content)' % filename, - after_header='%s (content)' % filename, - ) - - if not os.path.exists(filename): - if not create: - module.fail_json(rc=257, msg='Destination %s does not exist!' % filename) - destpath = os.path.dirname(filename) - if not os.path.exists(destpath) and not module.check_mode: - os.makedirs(destpath) - ini_lines = [] - else: - with io.open(filename, 'r', encoding="utf-8-sig") as ini_file: - ini_lines = [to_text(line) for line in ini_file.readlines()] - - if module._diff: - diff['before'] = u''.join(ini_lines) - - changed = False - - # ini file could be empty - if not ini_lines: - ini_lines.append(u'\n') - - # last line of file may not contain a trailing newline - if ini_lines[-1] == u"" or ini_lines[-1][-1] != u'\n': - ini_lines[-1] += u'\n' - changed = True - - # append fake section lines to simplify the logic - # At top: - # Fake random section to do not match any other in the file - # Using commit hash as fake section name - fake_section_name = u"ad01e11446efb704fcdbdb21f2c43757423d91c5" - - # Insert it at the beginning - ini_lines.insert(0, u'[%s]' % fake_section_name) - - # At bottom: - ini_lines.append(u'[') - - # If no section is defined, fake section is used - if not section: - section = fake_section_name - - within_section = not section - section_start = section_end = 0 - msg = 'OK' - if no_extra_spaces: - assignment_format = u'%s=%s\n' - else: - assignment_format = u'%s = %s\n' - - option_no_value_present = False - - non_blank_non_comment_pattern = re.compile(to_text(r'^[ \t]*([#;].*)?$')) - - before = after = [] - section_lines = [] - - for index, line in enumerate(ini_lines): - # find start and end of section - if line.startswith(u'[%s]' % section): - within_section = True - section_start = index - elif line.startswith(u'['): - if within_section: - section_end = index - break - - before = ini_lines[0:section_start] - section_lines = ini_lines[section_start:section_end] - after = ini_lines[section_end:len(ini_lines)] - - # Keep track of changed section_lines - changed_lines = [0] * len(section_lines) - - # handling multiple instances of option=value when state is 'present' with/without exclusive is a bit complex - # - # 1. edit all lines where we have a option=value pair with a matching value in values[] - # 2. edit all the remaing lines where we have a matching option - # 3. delete remaining lines where we have a matching option - # 4. insert missing option line(s) at the end of the section - - if state == 'present' and option: - for index, line in enumerate(section_lines): - if match_opt(option, line): - match = match_opt(option, line) - if values and match.group(6) in values: - matched_value = match.group(6) - if not matched_value and allow_no_value: - # replace existing option with no value line(s) - newline = u'%s\n' % option - option_no_value_present = True - else: - # replace existing option=value line(s) - newline = assignment_format % (option, matched_value) - (changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg) - values.remove(matched_value) - elif not values and allow_no_value: - # replace existing option with no value line(s) - newline = u'%s\n' % option - (changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg) - option_no_value_present = True - break - - if state == 'present' and exclusive and not allow_no_value: - # override option with no value to option with value if not allow_no_value - if len(values) > 0: - for index, line in enumerate(section_lines): - if not changed_lines[index] and match_active_opt(option, section_lines[index]): - newline = assignment_format % (option, values.pop(0)) - (changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg) - if len(values) == 0: - break - # remove all remaining option occurrences from the rest of the section - for index in range(len(section_lines) - 1, 0, -1): - if not changed_lines[index] and match_active_opt(option, section_lines[index]): - del section_lines[index] - del changed_lines[index] - changed = True - msg = 'option changed' - - if state == 'present': - # insert missing option line(s) at the end of the section - for index in range(len(section_lines), 0, -1): - # search backwards for previous non-blank or non-comment line - if not non_blank_non_comment_pattern.match(section_lines[index - 1]): - if option and values: - # insert option line(s) - for element in values[::-1]: - # items are added backwards, so traverse the list backwards to not confuse the user - # otherwise some of their options might appear in reverse order for whatever fancy reason ¯\_(ツ)_/¯ - if element is not None: - # insert option=value line - section_lines.insert(index, assignment_format % (option, element)) - msg = 'option added' - changed = True - elif element is None and allow_no_value: - # insert option with no value line - section_lines.insert(index, u'%s\n' % option) - msg = 'option added' - changed = True - elif option and not values and allow_no_value and not option_no_value_present: - # insert option with no value line(s) - section_lines.insert(index, u'%s\n' % option) - msg = 'option added' - changed = True - break - - if state == 'absent': - if option: - if exclusive: - # delete all option line(s) with given option and ignore value - new_section_lines = [line for line in section_lines if not (match_active_opt(option, line))] - if section_lines != new_section_lines: - changed = True - msg = 'option changed' - section_lines = new_section_lines - elif not exclusive and len(values) > 0: - # delete specified option=value line(s) - new_section_lines = [i for i in section_lines if not (match_active_opt(option, i) and match_active_opt(option, i).group(6) in values)] - if section_lines != new_section_lines: - changed = True - msg = 'option changed' - section_lines = new_section_lines - else: - # drop the entire section - if section_lines: - section_lines = [] - msg = 'section removed' - changed = True - - # reassemble the ini_lines after manipulation - ini_lines = before + section_lines + after - - # remove the fake section line - del ini_lines[0] - del ini_lines[-1:] - - if not within_section and state == 'present': - ini_lines.append(u'[%s]\n' % section) - msg = 'section and option added' - if option and values: - for value in values: - ini_lines.append(assignment_format % (option, value)) - elif option and not values and allow_no_value: - ini_lines.append(u'%s\n' % option) - else: - msg = 'only section added' - changed = True - - if module._diff: - diff['after'] = u''.join(ini_lines) - - backup_file = None - if changed and not module.check_mode: - if backup: - backup_file = module.backup_local(filename) - - encoded_ini_lines = [to_bytes(line) for line in ini_lines] - try: - tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir) - f = os.fdopen(tmpfd, 'wb') - f.writelines(encoded_ini_lines) - f.close() - except IOError: - module.fail_json(msg="Unable to create temporary file %s", traceback=traceback.format_exc()) - - try: - module.atomic_move(tmpfile, filename) - except IOError: - module.ansible.fail_json(msg='Unable to move temporary \ - file %s to %s, IOError' % (tmpfile, filename), traceback=traceback.format_exc()) - - return (changed, backup_file, diff, msg) - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - path=dict(type='path', required=True, aliases=['dest']), - section=dict(type='str', required=True), - option=dict(type='str'), - value=dict(type='str'), - values=dict(type='list', elements='str'), - backup=dict(type='bool', default=False), - state=dict(type='str', default='present', choices=['absent', 'present']), - exclusive=dict(type='bool', default=True), - no_extra_spaces=dict(type='bool', default=False), - allow_no_value=dict(type='bool', default=False), - create=dict(type='bool', default=True) - ), - mutually_exclusive=[ - ['value', 'values'] - ], - add_file_common_args=True, - supports_check_mode=True, - ) - - path = module.params['path'] - section = module.params['section'] - option = module.params['option'] - value = module.params['value'] - values = module.params['values'] - state = module.params['state'] - exclusive = module.params['exclusive'] - backup = module.params['backup'] - no_extra_spaces = module.params['no_extra_spaces'] - allow_no_value = module.params['allow_no_value'] - create = module.params['create'] - - if state == 'present' and not allow_no_value and value is None and not values: - module.fail_json(msg="Parameter 'value(s)' must be defined if state=present and allow_no_value=False.") - - if value is not None: - values = [value] - elif values is None: - values = [] - - (changed, backup_file, diff, msg) = do_ini(module, path, section, option, values, state, exclusive, backup, no_extra_spaces, create, allow_no_value) - - if not module.check_mode and os.path.exists(path): - file_args = module.load_file_common_arguments(module.params) - changed = module.set_fs_attributes_if_different(file_args, changed) - - results = dict( - changed=changed, - diff=diff, - msg=msg, - path=path, - ) - if backup_file is not None: - results['backup_file'] = backup_file - - # Mission complete - module.exit_json(**results) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/files/iso_create.py b/ansible_collections/community/general/plugins/modules/files/iso_create.py deleted file mode 100644 index 3fa45633..00000000 --- a/ansible_collections/community/general/plugins/modules/files/iso_create.py +++ /dev/null @@ -1,295 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2020, Ansible Project -# Copyright: (c) 2020, VMware, Inc. All Rights Reserved. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: iso_create -short_description: Generate ISO file with specified files or folders -description: - - This module is used to generate ISO file with specified path of files. -author: - - Diane Wang (@Tomorrow9) -requirements: -- "pycdlib" -- "python >= 2.7" -version_added: '0.2.0' - -options: - src_files: - description: - - This is a list of absolute paths of source files or folders which will be contained in the new generated ISO file. - - Will fail if specified file or folder in C(src_files) does not exist on local machine. - - 'Note: With all ISO9660 levels from 1 to 3, all file names are restricted to uppercase letters, numbers and - underscores (_). File names are limited to 31 characters, directory nesting is limited to 8 levels, and path - names are limited to 255 characters.' - type: list - required: yes - elements: path - dest_iso: - description: - - The absolute path with file name of the new generated ISO file on local machine. - - Will create intermediate folders when they does not exist. - type: path - required: yes - interchange_level: - description: - - The ISO9660 interchange level to use, it dictates the rules on the names of files. - - Levels and valid values C(1), C(2), C(3), C(4) are supported. - - The default value is level C(1), which is the most conservative, level C(3) is recommended. - - ISO9660 file names at interchange level C(1) cannot have more than 8 characters or 3 characters in the extension. - type: int - default: 1 - choices: [1, 2, 3, 4] - vol_ident: - description: - - The volume identification string to use on the new generated ISO image. - type: str - rock_ridge: - description: - - Whether to make this ISO have the Rock Ridge extensions or not. - - Valid values are C(1.09), C(1.10) or C(1.12), means adding the specified Rock Ridge version to the ISO. - - If unsure, set C(1.09) to ensure maximum compatibility. - - If not specified, then not add Rock Ridge extension to the ISO. - type: str - choices: ['1.09', '1.10', '1.12'] - joliet: - description: - - Support levels and valid values are C(1), C(2), or C(3). - - Level C(3) is by far the most common. - - If not specified, then no Joliet support is added. - type: int - choices: [1, 2, 3] - udf: - description: - - Whether to add UDF support to this ISO. - - If set to C(True), then version 2.60 of the UDF spec is used. - - If not specified or set to C(False), then no UDF support is added. - type: bool - default: False -''' - -EXAMPLES = r''' -- name: Create an ISO file - community.general.iso_create: - src_files: - - /root/testfile.yml - - /root/testfolder - dest_iso: /tmp/test.iso - interchange_level: 3 - -- name: Create an ISO file with Rock Ridge extension - community.general.iso_create: - src_files: - - /root/testfile.yml - - /root/testfolder - dest_iso: /tmp/test.iso - rock_ridge: 1.09 - -- name: Create an ISO file with Joliet support - community.general.iso_create: - src_files: - - ./windows_config/Autounattend.xml - dest_iso: ./test.iso - interchange_level: 3 - joliet: 3 - vol_ident: WIN_AUTOINSTALL -''' - -RETURN = r''' -source_file: - description: Configured source files or directories list. - returned: on success - type: list - elements: path - sample: ["/path/to/file.txt", "/path/to/folder"] -created_iso: - description: Created iso file path. - returned: on success - type: str - sample: "/path/to/test.iso" -interchange_level: - description: Configured interchange level. - returned: on success - type: int - sample: 3 -vol_ident: - description: Configured volume identification string. - returned: on success - type: str - sample: "OEMDRV" -joliet: - description: Configured Joliet support level. - returned: on success - type: int - sample: 3 -rock_ridge: - description: Configured Rock Ridge version. - returned: on success - type: str - sample: "1.09" -udf: - description: Configured UDF support. - returned: on success - type: bool - sample: False -''' - -import os -import traceback - -PYCDLIB_IMP_ERR = None -try: - import pycdlib - HAS_PYCDLIB = True -except ImportError: - PYCDLIB_IMP_ERR = traceback.format_exc() - HAS_PYCDLIB = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -def add_file(module, iso_file=None, src_file=None, file_path=None, rock_ridge=None, use_joliet=None, use_udf=None): - rr_name = None - joliet_path = None - udf_path = None - # In standard ISO interchange level 1, file names have a maximum of 8 characters, followed by a required dot, - # followed by a maximum 3 character extension, followed by a semicolon and a version - file_name = os.path.basename(file_path) - if '.' not in file_name: - file_in_iso_path = file_path.upper() + '.;1' - else: - file_in_iso_path = file_path.upper() + ';1' - if rock_ridge: - rr_name = file_name - if use_joliet: - joliet_path = file_path - if use_udf: - udf_path = file_path - try: - iso_file.add_file(src_file, iso_path=file_in_iso_path, rr_name=rr_name, joliet_path=joliet_path, udf_path=udf_path) - except Exception as err: - module.fail_json(msg="Failed to add file %s to ISO file due to %s" % (src_file, to_native(err))) - - -def add_directory(module, iso_file=None, dir_path=None, rock_ridge=None, use_joliet=None, use_udf=None): - rr_name = None - joliet_path = None - udf_path = None - iso_dir_path = dir_path.upper() - if rock_ridge: - rr_name = os.path.basename(dir_path) - if use_joliet: - joliet_path = iso_dir_path - if use_udf: - udf_path = iso_dir_path - try: - iso_file.add_directory(iso_path=iso_dir_path, rr_name=rr_name, joliet_path=joliet_path, udf_path=udf_path) - except Exception as err: - module.fail_json(msg="Failed to directory %s to ISO file due to %s" % (dir_path, to_native(err))) - - -def main(): - argument_spec = dict( - src_files=dict(type='list', required=True, elements='path'), - dest_iso=dict(type='path', required=True), - interchange_level=dict(type='int', choices=[1, 2, 3, 4], default=1), - vol_ident=dict(type='str'), - rock_ridge=dict(type='str', choices=['1.09', '1.10', '1.12']), - joliet=dict(type='int', choices=[1, 2, 3]), - udf=dict(type='bool', default=False), - ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - if not HAS_PYCDLIB: - module.fail_json(missing_required_lib('pycdlib'), exception=PYCDLIB_IMP_ERR) - - src_file_list = module.params.get('src_files') - if src_file_list and len(src_file_list) == 0: - module.fail_json(msg='Please specify source file and/or directory list using src_files parameter.') - for src_file in src_file_list: - if not os.path.exists(src_file): - module.fail_json(msg="Specified source file/directory path does not exist on local machine, %s" % src_file) - - dest_iso = module.params.get('dest_iso') - if dest_iso and len(dest_iso) == 0: - module.fail_json(msg='Please specify the absolute path of the new created ISO file using dest_iso parameter.') - - dest_iso_dir = os.path.dirname(dest_iso) - if dest_iso_dir and not os.path.exists(dest_iso_dir): - # will create intermediate dir for new ISO file - try: - os.makedirs(dest_iso_dir) - except OSError as err: - module.fail_json(msg='Exception caught when creating folder %s, with error %s' % (dest_iso_dir, to_native(err))) - - volume_id = module.params.get('vol_ident') - if volume_id is None: - volume_id = '' - inter_level = module.params.get('interchange_level') - rock_ridge = module.params.get('rock_ridge') - use_joliet = module.params.get('joliet') - use_udf = None - if module.params['udf']: - use_udf = '2.60' - - result = dict( - changed=False, - source_file=src_file_list, - created_iso=dest_iso, - interchange_level=inter_level, - vol_ident=volume_id, - rock_ridge=rock_ridge, - joliet=use_joliet, - udf=use_udf - ) - if not module.check_mode: - iso_file = pycdlib.PyCdlib() - iso_file.new(interchange_level=inter_level, vol_ident=volume_id, rock_ridge=rock_ridge, joliet=use_joliet, udf=use_udf) - - for src_file in src_file_list: - # if specify a dir then go through the dir to add files and dirs - if os.path.isdir(src_file): - dir_list = [] - file_list = [] - src_file = src_file.rstrip('/') - dir_name = os.path.basename(src_file) - add_directory(module, iso_file=iso_file, dir_path='/' + dir_name, rock_ridge=rock_ridge, - use_joliet=use_joliet, use_udf=use_udf) - - # get dir list and file list - for path, dirs, files in os.walk(src_file): - for filename in files: - file_list.append(os.path.join(path, filename)) - for dir in dirs: - dir_list.append(os.path.join(path, dir)) - for new_dir in dir_list: - add_directory(module, iso_file=iso_file, dir_path=new_dir.split(os.path.dirname(src_file))[1], - rock_ridge=rock_ridge, use_joliet=use_joliet, use_udf=use_udf) - for new_file in file_list: - add_file(module, iso_file=iso_file, src_file=new_file, - file_path=new_file.split(os.path.dirname(src_file))[1], rock_ridge=rock_ridge, - use_joliet=use_joliet, use_udf=use_udf) - # if specify a file then add this file directly to the '/' path in ISO - else: - add_file(module, iso_file=iso_file, src_file=src_file, file_path='/' + os.path.basename(src_file), - rock_ridge=rock_ridge, use_joliet=use_joliet, use_udf=use_udf) - - iso_file.write(dest_iso) - iso_file.close() - - result['changed'] = True - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/files/iso_extract.py b/ansible_collections/community/general/plugins/modules/files/iso_extract.py deleted file mode 100644 index 81fe6b66..00000000 --- a/ansible_collections/community/general/plugins/modules/files/iso_extract.py +++ /dev/null @@ -1,207 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2013, Jeroen Hoekx -# Copyright: (c) 2016, Matt Robinson -# Copyright: (c) 2017, Dag Wieers -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -author: -- Jeroen Hoekx (@jhoekx) -- Matt Robinson (@ribbons) -- Dag Wieers (@dagwieers) -module: iso_extract -short_description: Extract files from an ISO image -description: -- This module has two possible ways of operation. -- If 7zip is installed on the system, this module extracts files from an ISO - into a temporary directory and copies files to a given destination, - if needed. -- If the user has mount-capabilities (CAP_SYS_ADMIN on Linux) this module - mounts the ISO image to a temporary location, and copies files to a given - destination, if needed. -requirements: -- Either 7z (from I(7zip) or I(p7zip) package) -- Or mount capabilities (root-access, or CAP_SYS_ADMIN capability on Linux) -options: - image: - description: - - The ISO image to extract files from. - type: path - required: yes - aliases: [ path, src ] - dest: - description: - - The destination directory to extract files to. - type: path - required: yes - files: - description: - - A list of files to extract from the image. - - Extracting directories does not work. - type: list - elements: str - required: yes - force: - description: - - If C(yes), which will replace the remote file when contents are different than the source. - - If C(no), the file will only be extracted and copied if the destination does not already exist. - type: bool - default: yes - executable: - description: - - The path to the C(7z) executable to use for extracting files from the ISO. - - If not provided, it will assume the value C(7z). - type: path -notes: -- Only the file checksum (content) is taken into account when extracting files - from the ISO image. If C(force=no), only checks the presence of the file. -- In Ansible 2.3 this module was using C(mount) and C(umount) commands only, - requiring root access. This is no longer needed with the introduction of 7zip - for extraction. -''' - -EXAMPLES = r''' -- name: Extract kernel and ramdisk from a LiveCD - community.general.iso_extract: - image: /tmp/rear-test.iso - dest: /tmp/virt-rear/ - files: - - isolinux/kernel - - isolinux/initrd.cgz -''' - -RETURN = r''' -# -''' - -import os.path -import shutil -import tempfile - -from ansible.module_utils.basic import AnsibleModule - - -def main(): - module = AnsibleModule( - argument_spec=dict( - image=dict(type='path', required=True, aliases=['path', 'src']), - dest=dict(type='path', required=True), - files=dict(type='list', elements='str', required=True), - force=dict(type='bool', default=True), - executable=dict(type='path'), # No default on purpose - ), - supports_check_mode=True, - ) - image = module.params['image'] - dest = module.params['dest'] - files = module.params['files'] - force = module.params['force'] - executable = module.params['executable'] - - result = dict( - changed=False, - dest=dest, - image=image, - ) - - # We want to know if the user provided it or not, so we set default here - if executable is None: - executable = '7z' - - binary = module.get_bin_path(executable, None) - - # When executable was provided and binary not found, warn user ! - if module.params['executable'] is not None and not binary: - module.warn("Executable '%s' is not found on the system, trying to mount ISO instead." % executable) - - if not os.path.exists(dest): - module.fail_json(msg="Directory '%s' does not exist" % dest) - - if not os.path.exists(os.path.dirname(image)): - module.fail_json(msg="ISO image '%s' does not exist" % image) - - result['files'] = [] - extract_files = list(files) - - if not force: - # Check if we have to process any files based on existence - for f in files: - dest_file = os.path.join(dest, os.path.basename(f)) - if os.path.exists(dest_file): - result['files'].append(dict( - checksum=None, - dest=dest_file, - src=f, - )) - extract_files.remove(f) - - if not extract_files: - module.exit_json(**result) - - tmp_dir = tempfile.mkdtemp() - - # Use 7zip when we have a binary, otherwise try to mount - if binary: - cmd = [binary, 'x', image, '-o%s' % tmp_dir] + extract_files - else: - cmd = [module.get_bin_path('mount'), '-o', 'loop,ro', image, tmp_dir] - - rc, out, err = module.run_command(cmd) - if rc != 0: - result.update(dict( - cmd=cmd, - rc=rc, - stderr=err, - stdout=out, - )) - shutil.rmtree(tmp_dir) - - if binary: - module.fail_json(msg="Failed to extract from ISO image '%s' to '%s'" % (image, tmp_dir), **result) - else: - module.fail_json(msg="Failed to mount ISO image '%s' to '%s', and we could not find executable '%s'." % (image, tmp_dir, executable), **result) - - try: - for f in extract_files: - tmp_src = os.path.join(tmp_dir, f) - if not os.path.exists(tmp_src): - module.fail_json(msg="Failed to extract '%s' from ISO image" % f, **result) - - src_checksum = module.sha1(tmp_src) - - dest_file = os.path.join(dest, os.path.basename(f)) - - if os.path.exists(dest_file): - dest_checksum = module.sha1(dest_file) - else: - dest_checksum = None - - result['files'].append(dict( - checksum=src_checksum, - dest=dest_file, - src=f, - )) - - if src_checksum != dest_checksum: - if not module.check_mode: - shutil.copy(tmp_src, dest_file) - - result['changed'] = True - finally: - if not binary: - module.run_command([module.get_bin_path('umount'), tmp_dir]) - - shutil.rmtree(tmp_dir) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/files/read_csv.py b/ansible_collections/community/general/plugins/modules/files/read_csv.py deleted file mode 100644 index 2d5644db..00000000 --- a/ansible_collections/community/general/plugins/modules/files/read_csv.py +++ /dev/null @@ -1,213 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, Dag Wieers (@dagwieers) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: read_csv -short_description: Read a CSV file -description: -- Read a CSV file and return a list or a dictionary, containing one dictionary per row. -author: -- Dag Wieers (@dagwieers) -options: - path: - description: - - The CSV filename to read data from. - type: path - required: yes - aliases: [ filename ] - key: - description: - - The column name used as a key for the resulting dictionary. - - If C(key) is unset, the module returns a list of dictionaries, - where each dictionary is a row in the CSV file. - type: str - dialect: - description: - - The CSV dialect to use when parsing the CSV file. - - Possible values include C(excel), C(excel-tab) or C(unix). - type: str - default: excel - fieldnames: - description: - - A list of field names for every column. - - This is needed if the CSV does not have a header. - type: list - elements: str - unique: - description: - - Whether the C(key) used is expected to be unique. - type: bool - default: yes - delimiter: - description: - - A one-character string used to separate fields. - - When using this parameter, you change the default value used by C(dialect). - - The default value depends on the dialect used. - type: str - skipinitialspace: - description: - - Whether to ignore any whitespaces immediately following the delimiter. - - When using this parameter, you change the default value used by C(dialect). - - The default value depends on the dialect used. - type: bool - strict: - description: - - Whether to raise an exception on bad CSV input. - - When using this parameter, you change the default value used by C(dialect). - - The default value depends on the dialect used. - type: bool -notes: -- Ansible also ships with the C(csvfile) lookup plugin, which can be used to do selective lookups in CSV files from Jinja. -''' - -EXAMPLES = r''' -# Example CSV file with header -# -# name,uid,gid -# dag,500,500 -# jeroen,501,500 - -# Read a CSV file and access user 'dag' -- name: Read users from CSV file and return a dictionary - community.general.read_csv: - path: users.csv - key: name - register: users - delegate_to: localhost - -- ansible.builtin.debug: - msg: 'User {{ users.dict.dag.name }} has UID {{ users.dict.dag.uid }} and GID {{ users.dict.dag.gid }}' - -# Read a CSV file and access the first item -- name: Read users from CSV file and return a list - community.general.read_csv: - path: users.csv - register: users - delegate_to: localhost - -- ansible.builtin.debug: - msg: 'User {{ users.list.1.name }} has UID {{ users.list.1.uid }} and GID {{ users.list.1.gid }}' - -# Example CSV file without header and semi-colon delimiter -# -# dag;500;500 -# jeroen;501;500 - -# Read a CSV file without headers -- name: Read users from CSV file and return a list - community.general.read_csv: - path: users.csv - fieldnames: name,uid,gid - delimiter: ';' - register: users - delegate_to: localhost -''' - -RETURN = r''' -dict: - description: The CSV content as a dictionary. - returned: success - type: dict - sample: - dag: - name: dag - uid: 500 - gid: 500 - jeroen: - name: jeroen - uid: 501 - gid: 500 -list: - description: The CSV content as a list. - returned: success - type: list - sample: - - name: dag - uid: 500 - gid: 500 - - name: jeroen - uid: 501 - gid: 500 -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - -from ansible_collections.community.general.plugins.module_utils.csv import (initialize_dialect, read_csv, CSVError, - DialectNotAvailableError, - CustomDialectFailureError) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - path=dict(type='path', required=True, aliases=['filename']), - dialect=dict(type='str', default='excel'), - key=dict(type='str', no_log=False), - fieldnames=dict(type='list', elements='str'), - unique=dict(type='bool', default=True), - delimiter=dict(type='str'), - skipinitialspace=dict(type='bool'), - strict=dict(type='bool'), - ), - supports_check_mode=True, - ) - - path = module.params['path'] - dialect = module.params['dialect'] - key = module.params['key'] - fieldnames = module.params['fieldnames'] - unique = module.params['unique'] - - dialect_params = { - "delimiter": module.params['delimiter'], - "skipinitialspace": module.params['skipinitialspace'], - "strict": module.params['strict'], - } - - try: - dialect = initialize_dialect(dialect, **dialect_params) - except (CustomDialectFailureError, DialectNotAvailableError) as e: - module.fail_json(msg=to_native(e)) - - try: - with open(path, 'rb') as f: - data = f.read() - except (IOError, OSError) as e: - module.fail_json(msg="Unable to open file: %s" % to_native(e)) - - reader = read_csv(data, dialect, fieldnames) - - if key and key not in reader.fieldnames: - module.fail_json(msg="Key '%s' was not found in the CSV header fields: %s" % (key, ', '.join(reader.fieldnames))) - - data_dict = dict() - data_list = list() - - if key is None: - try: - for row in reader: - data_list.append(row) - except CSVError as e: - module.fail_json(msg="Unable to process file: %s" % to_native(e)) - else: - try: - for row in reader: - if unique and row[key] in data_dict: - module.fail_json(msg="Key '%s' is not unique for value '%s'" % (key, row[key])) - data_dict[row[key]] = row - except CSVError as e: - module.fail_json(msg="Unable to process file: %s" % to_native(e)) - - module.exit_json(dict=data_dict, list=data_list) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/files/sapcar_extract.py b/ansible_collections/community/general/plugins/modules/files/sapcar_extract.py deleted file mode 100644 index 8463703c..00000000 --- a/ansible_collections/community/general/plugins/modules/files/sapcar_extract.py +++ /dev/null @@ -1,220 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Rainer Leber -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: sapcar_extract -short_description: Manages SAP SAPCAR archives -version_added: "3.2.0" -description: - - Provides support for unpacking C(sar)/C(car) files with the SAPCAR binary from SAP and pulling - information back into Ansible. -options: - path: - description: The path to the SAR/CAR file. - type: path - required: true - dest: - description: - - The destination where SAPCAR extracts the SAR file. Missing folders will be created. - If this parameter is not provided it will unpack in the same folder as the SAR file. - type: path - binary_path: - description: - - The path to the SAPCAR binary, for example, C(/home/dummy/sapcar) or C(https://myserver/SAPCAR). - If this parameter is not provided the module will look in C(PATH). - type: path - signature: - description: - - If C(true) the signature will be extracted. - default: false - type: bool - security_library: - description: - - The path to the security library, for example, C(/usr/sap/hostctrl/exe/libsapcrytp.so), for signature operations. - type: path - manifest: - description: - - The name of the manifest. - default: "SIGNATURE.SMF" - type: str - remove: - description: - - If C(true) the SAR/CAR file will be removed. B(This should be used with caution!) - default: false - type: bool -author: - - Rainer Leber (@RainerLeber) -notes: - - Always returns C(changed=true) in C(check_mode). -''' - -EXAMPLES = """ -- name: Extract SAR file - community.general.sapcar_extract: - path: "~/source/hana.sar" - -- name: Extract SAR file with destination - community.general.sapcar_extract: - path: "~/source/hana.sar" - dest: "~/test/" - -- name: Extract SAR file with destination and download from webserver can be a fileshare as well - community.general.sapcar_extract: - path: "~/source/hana.sar" - dest: "~/dest/" - binary_path: "https://myserver/SAPCAR" - -- name: Extract SAR file and delete SAR after extract - community.general.sapcar_extract: - path: "~/source/hana.sar" - remove: true - -- name: Extract SAR file with manifest - community.general.sapcar_extract: - path: "~/source/hana.sar" - signature: true - -- name: Extract SAR file with manifest and rename it - community.general.sapcar_extract: - path: "~/source/hana.sar" - manifest: "MyNewSignature.SMF" - signature: true -""" - -import os -from tempfile import NamedTemporaryFile -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import open_url -from ansible.module_utils.common.text.converters import to_native - - -def get_list_of_files(dir_name): - # create a list of file and directories - # names in the given directory - list_of_file = os.listdir(dir_name) - allFiles = list() - # Iterate over all the entries - for entry in list_of_file: - # Create full path - fullPath = os.path.join(dir_name, entry) - # If entry is a directory then get the list of files in this directory - if os.path.isdir(fullPath): - allFiles = allFiles + [fullPath] - allFiles = allFiles + get_list_of_files(fullPath) - else: - allFiles.append(fullPath) - return allFiles - - -def download_SAPCAR(binary_path, module): - bin_path = None - # download sapcar binary if url is provided otherwise path is returned - if binary_path is not None: - if binary_path.startswith('https://') or binary_path.startswith('http://'): - random_file = NamedTemporaryFile(delete=False) - with open_url(binary_path) as response: - with random_file as out_file: - data = response.read() - out_file.write(data) - os.chmod(out_file.name, 0o700) - bin_path = out_file.name - module.add_cleanup_file(bin_path) - else: - bin_path = binary_path - return bin_path - - -def check_if_present(command, path, dest, signature, manifest, module): - # manipuliating output from SAR file for compare with already extracted files - iter_command = [command, '-tvf', path] - sar_out = module.run_command(iter_command)[1] - sar_raw = sar_out.split("\n")[1:] - if dest[-1] != "/": - dest = dest + "/" - sar_files = [dest + x.split(" ")[-1] for x in sar_raw if x] - # remove any SIGNATURE.SMF from list because it will not unpacked if signature is false - if not signature: - sar_files = [item for item in sar_files if '.SMF' not in item] - # if signature is renamed manipulate files in list of sar file for compare. - if manifest != "SIGNATURE.SMF": - sar_files = [item for item in sar_files if '.SMF' not in item] - sar_files = sar_files + [manifest] - # get extracted files if present - files_extracted = get_list_of_files(dest) - # compare extracted files with files in sar file - present = all(elem in files_extracted for elem in sar_files) - return present - - -def main(): - module = AnsibleModule( - argument_spec=dict( - path=dict(type='path', required=True), - dest=dict(type='path'), - binary_path=dict(type='path'), - signature=dict(type='bool', default=False), - security_library=dict(type='path'), - manifest=dict(type='str', default="SIGNATURE.SMF"), - remove=dict(type='bool', default=False), - ), - supports_check_mode=True, - ) - rc, out, err = [0, "", ""] - params = module.params - check_mode = module.check_mode - - path = params['path'] - dest = params['dest'] - signature = params['signature'] - security_library = params['security_library'] - manifest = params['manifest'] - remove = params['remove'] - - bin_path = download_SAPCAR(params['binary_path'], module) - - if dest is None: - dest_head_tail = os.path.split(path) - dest = dest_head_tail[0] + '/' - else: - if not os.path.exists(dest): - os.makedirs(dest, 0o755) - - if bin_path is not None: - command = [module.get_bin_path(bin_path, required=True)] - else: - try: - command = [module.get_bin_path('sapcar', required=True)] - except Exception as e: - module.fail_json(msg='Failed to find SAPCAR at the expected path or URL "{0}". Please check whether it is available: {1}' - .format(bin_path, to_native(e))) - - present = check_if_present(command[0], path, dest, signature, manifest, module) - - if not present: - command.extend(['-xvf', path, '-R', dest]) - if security_library: - command.extend(['-L', security_library]) - if signature: - command.extend(['-manifest', manifest]) - if not check_mode: - (rc, out, err) = module.run_command(command, check_rc=True) - changed = True - else: - changed = False - out = "allready unpacked" - - if remove: - os.remove(path) - - module.exit_json(changed=changed, message=rc, stdout=out, - stderr=err, command=' '.join(command)) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/files/xattr.py b/ansible_collections/community/general/plugins/modules/files/xattr.py deleted file mode 100644 index c0867892..00000000 --- a/ansible_collections/community/general/plugins/modules/files/xattr.py +++ /dev/null @@ -1,239 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: xattr -short_description: Manage user defined extended attributes -description: - - Manages filesystem user defined extended attributes. - - Requires that extended attributes are enabled on the target filesystem - and that the setfattr/getfattr utilities are present. -options: - path: - description: - - The full path of the file/object to get the facts of. - - Before 2.3 this option was only usable as I(name). - type: path - required: true - aliases: [ name ] - namespace: - description: - - Namespace of the named name/key. - type: str - default: user - key: - description: - - The name of a specific Extended attribute key to set/retrieve. - type: str - value: - description: - - The value to set the named name/key to, it automatically sets the I(state) to C(present). - type: str - state: - description: - - defines which state you want to do. - C(read) retrieves the current value for a I(key) (default) - C(present) sets I(path) to C(value), default if value is set - C(all) dumps all data - C(keys) retrieves all keys - C(absent) deletes the key - type: str - choices: [ absent, all, keys, present, read ] - default: read - follow: - description: - - If C(true), dereferences symlinks and sets/gets attributes on symlink target, - otherwise acts on symlink itself. - type: bool - default: true -notes: - - As of Ansible 2.3, the I(name) option has been changed to I(path) as default, but I(name) still works as well. -author: - - Brian Coca (@bcoca) -''' - -EXAMPLES = ''' -- name: Obtain the extended attributes of /etc/foo.conf - community.general.xattr: - path: /etc/foo.conf - -- name: Set the key 'user.foo' to value 'bar' - community.general.xattr: - path: /etc/foo.conf - key: foo - value: bar - -- name: Set the key 'trusted.glusterfs.volume-id' to value '0x817b94343f164f199e5b573b4ea1f914' - community.general.xattr: - path: /mnt/bricks/brick1 - namespace: trusted - key: glusterfs.volume-id - value: "0x817b94343f164f199e5b573b4ea1f914" - -- name: Remove the key 'user.foo' - community.general.xattr: - path: /etc/foo.conf - key: foo - state: absent - -- name: Remove the key 'trusted.glusterfs.volume-id' - community.general.xattr: - path: /mnt/bricks/brick1 - namespace: trusted - key: glusterfs.volume-id - state: absent -''' - -import os - -# import module snippets -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - - -def get_xattr_keys(module, path, follow): - cmd = [module.get_bin_path('getfattr', True), '--absolute-names'] - - if not follow: - cmd.append('-h') - cmd.append(path) - - return _run_xattr(module, cmd) - - -def get_xattr(module, path, key, follow): - cmd = [module.get_bin_path('getfattr', True), '--absolute-names'] - - if not follow: - cmd.append('-h') - if key is None: - cmd.append('-d') - else: - cmd.append('-n') - cmd.append(key) - cmd.append(path) - - return _run_xattr(module, cmd, False) - - -def set_xattr(module, path, key, value, follow): - - cmd = [module.get_bin_path('setfattr', True)] - if not follow: - cmd.append('-h') - cmd.append('-n') - cmd.append(key) - cmd.append('-v') - cmd.append(value) - cmd.append(path) - - return _run_xattr(module, cmd) - - -def rm_xattr(module, path, key, follow): - - cmd = [module.get_bin_path('setfattr', True)] - if not follow: - cmd.append('-h') - cmd.append('-x') - cmd.append(key) - cmd.append(path) - - return _run_xattr(module, cmd, False) - - -def _run_xattr(module, cmd, check_rc=True): - - try: - (rc, out, err) = module.run_command(cmd, check_rc=check_rc) - except Exception as e: - module.fail_json(msg="%s!" % to_native(e)) - - # result = {'raw': out} - result = {} - for line in out.splitlines(): - if line.startswith('#') or line == '': - pass - elif '=' in line: - (key, val) = line.split('=', 1) - result[key] = val.strip('"') - else: - result[line] = '' - return result - - -def main(): - module = AnsibleModule( - argument_spec=dict( - path=dict(type='path', required=True, aliases=['name']), - namespace=dict(type='str', default='user'), - key=dict(type='str', no_log=False), - value=dict(type='str'), - state=dict(type='str', default='read', choices=['absent', 'all', 'keys', 'present', 'read']), - follow=dict(type='bool', default=True), - ), - supports_check_mode=True, - ) - path = module.params.get('path') - namespace = module.params.get('namespace') - key = module.params.get('key') - value = module.params.get('value') - state = module.params.get('state') - follow = module.params.get('follow') - - if not os.path.exists(path): - module.fail_json(msg="path not found or not accessible!") - - changed = False - msg = "" - res = {} - - if key is None and state in ['absent', 'present']: - module.fail_json(msg="%s needs a key parameter" % state) - - # Prepend the key with the namespace if defined - if ( - key is not None and - namespace is not None and - len(namespace) > 0 and - not (namespace == 'user' and key.startswith('user.'))): - key = '%s.%s' % (namespace, key) - - if (state == 'present' or value is not None): - current = get_xattr(module, path, key, follow) - if current is None or key not in current or value != current[key]: - if not module.check_mode: - res = set_xattr(module, path, key, value, follow) - changed = True - res = current - msg = "%s set to %s" % (key, value) - elif state == 'absent': - current = get_xattr(module, path, key, follow) - if current is not None and key in current: - if not module.check_mode: - res = rm_xattr(module, path, key, follow) - changed = True - res = current - msg = "%s removed" % (key) - elif state == 'keys': - res = get_xattr_keys(module, path, follow) - msg = "returning all keys" - elif state == 'all': - res = get_xattr(module, path, None, follow) - msg = "dumping all" - else: - res = get_xattr(module, path, key, follow) - msg = "returning %s" % key - - module.exit_json(changed=changed, msg=msg, xattr=res) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/files/xml.py b/ansible_collections/community/general/plugins/modules/files/xml.py deleted file mode 100644 index ae95e9c6..00000000 --- a/ansible_collections/community/general/plugins/modules/files/xml.py +++ /dev/null @@ -1,987 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2014, Red Hat, Inc. -# Copyright: (c) 2014, Tim Bielawa -# Copyright: (c) 2014, Magnus Hedemark -# Copyright: (c) 2017, Dag Wieers -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: xml -short_description: Manage bits and pieces of XML files or strings -description: -- A CRUD-like interface to managing bits of XML files. -options: - path: - description: - - Path to the file to operate on. - - This file must exist ahead of time. - - This parameter is required, unless C(xmlstring) is given. - type: path - aliases: [ dest, file ] - xmlstring: - description: - - A string containing XML on which to operate. - - This parameter is required, unless C(path) is given. - type: str - xpath: - description: - - A valid XPath expression describing the item(s) you want to manipulate. - - Operates on the document root, C(/), by default. - type: str - namespaces: - description: - - The namespace C(prefix:uri) mapping for the XPath expression. - - Needs to be a C(dict), not a C(list) of items. - type: dict - state: - description: - - Set or remove an xpath selection (node(s), attribute(s)). - type: str - choices: [ absent, present ] - default: present - aliases: [ ensure ] - attribute: - description: - - The attribute to select when using parameter C(value). - - This is a string, not prepended with C(@). - type: raw - value: - description: - - Desired state of the selected attribute. - - Either a string, or to unset a value, the Python C(None) keyword (YAML Equivalent, C(null)). - - Elements default to no value (but present). - - Attributes default to an empty string. - type: raw - add_children: - description: - - Add additional child-element(s) to a selected element for a given C(xpath). - - Child elements must be given in a list and each item may be either a string - (eg. C(children=ansible) to add an empty C() child element), - or a hash where the key is an element name and the value is the element value. - - This parameter requires C(xpath) to be set. - type: list - elements: raw - set_children: - description: - - Set the child-element(s) of a selected element for a given C(xpath). - - Removes any existing children. - - Child elements must be specified as in C(add_children). - - This parameter requires C(xpath) to be set. - type: list - elements: raw - count: - description: - - Search for a given C(xpath) and provide the count of any matches. - - This parameter requires C(xpath) to be set. - type: bool - default: no - print_match: - description: - - Search for a given C(xpath) and print out any matches. - - This parameter requires C(xpath) to be set. - type: bool - default: no - pretty_print: - description: - - Pretty print XML output. - type: bool - default: no - content: - description: - - Search for a given C(xpath) and get content. - - This parameter requires C(xpath) to be set. - type: str - choices: [ attribute, text ] - input_type: - description: - - Type of input for C(add_children) and C(set_children). - type: str - choices: [ xml, yaml ] - default: yaml - backup: - description: - - Create a backup file including the timestamp information so you can get - the original file back if you somehow clobbered it incorrectly. - type: bool - default: no - strip_cdata_tags: - description: - - Remove CDATA tags surrounding text values. - - Note that this might break your XML file if text values contain characters that could be interpreted as XML. - type: bool - default: no - insertbefore: - description: - - Add additional child-element(s) before the first selected element for a given C(xpath). - - Child elements must be given in a list and each item may be either a string - (eg. C(children=ansible) to add an empty C() child element), - or a hash where the key is an element name and the value is the element value. - - This parameter requires C(xpath) to be set. - type: bool - default: no - insertafter: - description: - - Add additional child-element(s) after the last selected element for a given C(xpath). - - Child elements must be given in a list and each item may be either a string - (eg. C(children=ansible) to add an empty C() child element), - or a hash where the key is an element name and the value is the element value. - - This parameter requires C(xpath) to be set. - type: bool - default: no -requirements: -- lxml >= 2.3.0 -notes: -- Use the C(--check) and C(--diff) options when testing your expressions. -- The diff output is automatically pretty-printed, so may not reflect the actual file content, only the file structure. -- This module does not handle complicated xpath expressions, so limit xpath selectors to simple expressions. -- Beware that in case your XML elements are namespaced, you need to use the C(namespaces) parameter, see the examples. -- Namespaces prefix should be used for all children of an element where namespace is defined, unless another namespace is defined for them. -seealso: -- name: Xml module development community wiki - description: More information related to the development of this xml module. - link: https://github.com/ansible/community/wiki/Module:-xml -- name: Introduction to XPath - description: A brief tutorial on XPath (w3schools.com). - link: https://www.w3schools.com/xml/xpath_intro.asp -- name: XPath Reference document - description: The reference documentation on XSLT/XPath (developer.mozilla.org). - link: https://developer.mozilla.org/en-US/docs/Web/XPath -author: -- Tim Bielawa (@tbielawa) -- Magnus Hedemark (@magnus919) -- Dag Wieers (@dagwieers) -''' - -EXAMPLES = r''' -# Consider the following XML file: -# -# -# Tasty Beverage Co. -# -# Rochefort 10 -# St. Bernardus Abbot 12 -# Schlitz -# -# 10 -# -# -#
http://tastybeverageco.com
-#
-#
- -- name: Remove the 'subjective' attribute of the 'rating' element - community.general.xml: - path: /foo/bar.xml - xpath: /business/rating/@subjective - state: absent - -- name: Set the rating to '11' - community.general.xml: - path: /foo/bar.xml - xpath: /business/rating - value: 11 - -# Retrieve and display the number of nodes -- name: Get count of 'beers' nodes - community.general.xml: - path: /foo/bar.xml - xpath: /business/beers/beer - count: yes - register: hits - -- ansible.builtin.debug: - var: hits.count - -# Example where parent XML nodes are created automatically -- name: Add a 'phonenumber' element to the 'business' element - community.general.xml: - path: /foo/bar.xml - xpath: /business/phonenumber - value: 555-555-1234 - -- name: Add several more beers to the 'beers' element - community.general.xml: - path: /foo/bar.xml - xpath: /business/beers - add_children: - - beer: Old Rasputin - - beer: Old Motor Oil - - beer: Old Curmudgeon - -- name: Add several more beers to the 'beers' element and add them before the 'Rochefort 10' element - community.general.xml: - path: /foo/bar.xml - xpath: '/business/beers/beer[text()="Rochefort 10"]' - insertbefore: yes - add_children: - - beer: Old Rasputin - - beer: Old Motor Oil - - beer: Old Curmudgeon - -# NOTE: The 'state' defaults to 'present' and 'value' defaults to 'null' for elements -- name: Add a 'validxhtml' element to the 'website' element - community.general.xml: - path: /foo/bar.xml - xpath: /business/website/validxhtml - -- name: Add an empty 'validatedon' attribute to the 'validxhtml' element - community.general.xml: - path: /foo/bar.xml - xpath: /business/website/validxhtml/@validatedon - -- name: Add or modify an attribute, add element if needed - community.general.xml: - path: /foo/bar.xml - xpath: /business/website/validxhtml - attribute: validatedon - value: 1976-08-05 - -# How to read an attribute value and access it in Ansible -- name: Read an element's attribute values - community.general.xml: - path: /foo/bar.xml - xpath: /business/website/validxhtml - content: attribute - register: xmlresp - -- name: Show an attribute value - ansible.builtin.debug: - var: xmlresp.matches[0].validxhtml.validatedon - -- name: Remove all children from the 'website' element (option 1) - community.general.xml: - path: /foo/bar.xml - xpath: /business/website/* - state: absent - -- name: Remove all children from the 'website' element (option 2) - community.general.xml: - path: /foo/bar.xml - xpath: /business/website - children: [] - -# In case of namespaces, like in below XML, they have to be explicitly stated. -# -# -# -# -# -# - -# NOTE: There is the prefix 'x' in front of the 'bar' element, too. -- name: Set namespaced '/x:foo/x:bar/y:baz/@z:my_namespaced_attribute' to 'false' - community.general.xml: - path: foo.xml - xpath: /x:foo/x:bar/y:baz - namespaces: - x: http://x.test - y: http://y.test - z: http://z.test - attribute: z:my_namespaced_attribute - value: 'false' - -- name: Adding building nodes with floor subnodes from a YAML variable - community.general.xml: - path: /foo/bar.xml - xpath: /business - add_children: - - building: - # Attributes - name: Scumm bar - location: Monkey island - # Subnodes - _: - - floor: Pirate hall - - floor: Grog storage - - construction_date: "1990" # Only strings are valid - - building: Grog factory - -# Consider this XML for following example - -# -# -# -# part to remove -# -# -# part to keep -# -# - -- name: Delete element node based upon attribute - community.general.xml: - path: bar.xml - xpath: /config/element[@name='test1'] - state: absent -''' - -RETURN = r''' -actions: - description: A dictionary with the original xpath, namespaces and state. - type: dict - returned: success - sample: {xpath: xpath, namespaces: [namespace1, namespace2], state=present} -backup_file: - description: The name of the backup file that was created - type: str - returned: when backup=yes - sample: /path/to/file.xml.1942.2017-08-24@14:16:01~ -count: - description: The count of xpath matches. - type: int - returned: when parameter 'count' is set - sample: 2 -matches: - description: The xpath matches found. - type: list - returned: when parameter 'print_match' is set -msg: - description: A message related to the performed action(s). - type: str - returned: always -xmlstring: - description: An XML string of the resulting output. - type: str - returned: when parameter 'xmlstring' is set -''' - -import copy -import json -import os -import re -import traceback - -from io import BytesIO - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -LXML_IMP_ERR = None -try: - from lxml import etree, objectify - HAS_LXML = True -except ImportError: - LXML_IMP_ERR = traceback.format_exc() - HAS_LXML = False - -from ansible.module_utils.basic import AnsibleModule, json_dict_bytes_to_unicode, missing_required_lib -from ansible.module_utils.six import iteritems, string_types -from ansible.module_utils.common.text.converters import to_bytes, to_native -from ansible.module_utils.common._collections_compat import MutableMapping - -_IDENT = r"[a-zA-Z-][a-zA-Z0-9_\-\.]*" -_NSIDENT = _IDENT + "|" + _IDENT + ":" + _IDENT -# Note: we can't reasonably support the 'if you need to put both ' and " in a string, concatenate -# strings wrapped by the other delimiter' XPath trick, especially as simple XPath. -_XPSTR = "('(?:.*)'|\"(?:.*)\")" - -_RE_SPLITSIMPLELAST = re.compile("^(.*)/(" + _NSIDENT + ")$") -_RE_SPLITSIMPLELASTEQVALUE = re.compile("^(.*)/(" + _NSIDENT + ")/text\\(\\)=" + _XPSTR + "$") -_RE_SPLITSIMPLEATTRLAST = re.compile("^(.*)/(@(?:" + _NSIDENT + "))$") -_RE_SPLITSIMPLEATTRLASTEQVALUE = re.compile("^(.*)/(@(?:" + _NSIDENT + "))=" + _XPSTR + "$") -_RE_SPLITSUBLAST = re.compile("^(.*)/(" + _NSIDENT + ")\\[(.*)\\]$") -_RE_SPLITONLYEQVALUE = re.compile("^(.*)/text\\(\\)=" + _XPSTR + "$") - - -def has_changed(doc): - orig_obj = etree.tostring(objectify.fromstring(etree.tostring(orig_doc))) - obj = etree.tostring(objectify.fromstring(etree.tostring(doc))) - return (orig_obj != obj) - - -def do_print_match(module, tree, xpath, namespaces): - match = tree.xpath(xpath, namespaces=namespaces) - match_xpaths = [] - for m in match: - match_xpaths.append(tree.getpath(m)) - match_str = json.dumps(match_xpaths) - msg = "selector '%s' match: %s" % (xpath, match_str) - finish(module, tree, xpath, namespaces, changed=False, msg=msg) - - -def count_nodes(module, tree, xpath, namespaces): - """ Return the count of nodes matching the xpath """ - hits = tree.xpath("count(/%s)" % xpath, namespaces=namespaces) - msg = "found %d nodes" % hits - finish(module, tree, xpath, namespaces, changed=False, msg=msg, hitcount=int(hits)) - - -def is_node(tree, xpath, namespaces): - """ Test if a given xpath matches anything and if that match is a node. - - For now we just assume you're only searching for one specific thing.""" - if xpath_matches(tree, xpath, namespaces): - # OK, it found something - match = tree.xpath(xpath, namespaces=namespaces) - if isinstance(match[0], etree._Element): - return True - - return False - - -def is_attribute(tree, xpath, namespaces): - """ Test if a given xpath matches and that match is an attribute - - An xpath attribute search will only match one item""" - if xpath_matches(tree, xpath, namespaces): - match = tree.xpath(xpath, namespaces=namespaces) - if isinstance(match[0], etree._ElementStringResult): - return True - elif isinstance(match[0], etree._ElementUnicodeResult): - return True - return False - - -def xpath_matches(tree, xpath, namespaces): - """ Test if a node exists """ - if tree.xpath(xpath, namespaces=namespaces): - return True - return False - - -def delete_xpath_target(module, tree, xpath, namespaces): - """ Delete an attribute or element from a tree """ - changed = False - try: - for result in tree.xpath(xpath, namespaces=namespaces): - changed = True - # Get the xpath for this result - if is_attribute(tree, xpath, namespaces): - # Delete an attribute - parent = result.getparent() - # Pop this attribute match out of the parent - # node's 'attrib' dict by using this match's - # 'attrname' attribute for the key - parent.attrib.pop(result.attrname) - elif is_node(tree, xpath, namespaces): - # Delete an element - result.getparent().remove(result) - else: - raise Exception("Impossible error") - except Exception as e: - module.fail_json(msg="Couldn't delete xpath target: %s (%s)" % (xpath, e)) - else: - finish(module, tree, xpath, namespaces, changed=changed) - - -def replace_children_of(children, match): - for element in list(match): - match.remove(element) - match.extend(children) - - -def set_target_children_inner(module, tree, xpath, namespaces, children, in_type): - matches = tree.xpath(xpath, namespaces=namespaces) - - # Create a list of our new children - children = children_to_nodes(module, children, in_type) - children_as_string = [etree.tostring(c) for c in children] - - changed = False - - # xpaths always return matches as a list, so.... - for match in matches: - # Check if elements differ - if len(list(match)) == len(children): - for idx, element in enumerate(list(match)): - if etree.tostring(element) != children_as_string[idx]: - replace_children_of(children, match) - changed = True - break - else: - replace_children_of(children, match) - changed = True - - return changed - - -def set_target_children(module, tree, xpath, namespaces, children, in_type): - changed = set_target_children_inner(module, tree, xpath, namespaces, children, in_type) - # Write it out - finish(module, tree, xpath, namespaces, changed=changed) - - -def add_target_children(module, tree, xpath, namespaces, children, in_type, insertbefore, insertafter): - if is_node(tree, xpath, namespaces): - new_kids = children_to_nodes(module, children, in_type) - if insertbefore or insertafter: - insert_target_children(tree, xpath, namespaces, new_kids, insertbefore, insertafter) - else: - for node in tree.xpath(xpath, namespaces=namespaces): - node.extend(new_kids) - finish(module, tree, xpath, namespaces, changed=True) - else: - finish(module, tree, xpath, namespaces) - - -def insert_target_children(tree, xpath, namespaces, children, insertbefore, insertafter): - """ - Insert the given children before or after the given xpath. If insertbefore is True, it is inserted before the - first xpath hit, with insertafter, it is inserted after the last xpath hit. - """ - insert_target = tree.xpath(xpath, namespaces=namespaces) - loc_index = 0 if insertbefore else -1 - index_in_parent = insert_target[loc_index].getparent().index(insert_target[loc_index]) - parent = insert_target[0].getparent() - if insertafter: - index_in_parent += 1 - for child in children: - parent.insert(index_in_parent, child) - index_in_parent += 1 - - -def _extract_xpstr(g): - return g[1:-1] - - -def split_xpath_last(xpath): - """split an XPath of the form /foo/bar/baz into /foo/bar and baz""" - xpath = xpath.strip() - m = _RE_SPLITSIMPLELAST.match(xpath) - if m: - # requesting an element to exist - return (m.group(1), [(m.group(2), None)]) - m = _RE_SPLITSIMPLELASTEQVALUE.match(xpath) - if m: - # requesting an element to exist with an inner text - return (m.group(1), [(m.group(2), _extract_xpstr(m.group(3)))]) - - m = _RE_SPLITSIMPLEATTRLAST.match(xpath) - if m: - # requesting an attribute to exist - return (m.group(1), [(m.group(2), None)]) - m = _RE_SPLITSIMPLEATTRLASTEQVALUE.match(xpath) - if m: - # requesting an attribute to exist with a value - return (m.group(1), [(m.group(2), _extract_xpstr(m.group(3)))]) - - m = _RE_SPLITSUBLAST.match(xpath) - if m: - content = [x.strip() for x in m.group(3).split(" and ")] - return (m.group(1), [('/' + m.group(2), content)]) - - m = _RE_SPLITONLYEQVALUE.match(xpath) - if m: - # requesting a change of inner text - return (m.group(1), [("", _extract_xpstr(m.group(2)))]) - return (xpath, []) - - -def nsnameToClark(name, namespaces): - if ":" in name: - (nsname, rawname) = name.split(":") - # return "{{%s}}%s" % (namespaces[nsname], rawname) - return "{{{0}}}{1}".format(namespaces[nsname], rawname) - - # no namespace name here - return name - - -def check_or_make_target(module, tree, xpath, namespaces): - (inner_xpath, changes) = split_xpath_last(xpath) - if (inner_xpath == xpath) or (changes is None): - module.fail_json(msg="Can't process Xpath %s in order to spawn nodes! tree is %s" % - (xpath, etree.tostring(tree, pretty_print=True))) - return False - - changed = False - - if not is_node(tree, inner_xpath, namespaces): - changed = check_or_make_target(module, tree, inner_xpath, namespaces) - - # we test again after calling check_or_make_target - if is_node(tree, inner_xpath, namespaces) and changes: - for (eoa, eoa_value) in changes: - if eoa and eoa[0] != '@' and eoa[0] != '/': - # implicitly creating an element - new_kids = children_to_nodes(module, [nsnameToClark(eoa, namespaces)], "yaml") - if eoa_value: - for nk in new_kids: - nk.text = eoa_value - - for node in tree.xpath(inner_xpath, namespaces=namespaces): - node.extend(new_kids) - changed = True - # module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True)) - elif eoa and eoa[0] == '/': - element = eoa[1:] - new_kids = children_to_nodes(module, [nsnameToClark(element, namespaces)], "yaml") - for node in tree.xpath(inner_xpath, namespaces=namespaces): - node.extend(new_kids) - for nk in new_kids: - for subexpr in eoa_value: - # module.fail_json(msg="element=%s subexpr=%s node=%s now tree=%s" % - # (element, subexpr, etree.tostring(node, pretty_print=True), etree.tostring(tree, pretty_print=True)) - check_or_make_target(module, nk, "./" + subexpr, namespaces) - changed = True - - # module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True)) - elif eoa == "": - for node in tree.xpath(inner_xpath, namespaces=namespaces): - if (node.text != eoa_value): - node.text = eoa_value - changed = True - - elif eoa and eoa[0] == '@': - attribute = nsnameToClark(eoa[1:], namespaces) - - for element in tree.xpath(inner_xpath, namespaces=namespaces): - changing = (attribute not in element.attrib or element.attrib[attribute] != eoa_value) - - if changing: - changed = changed or changing - if eoa_value is None: - value = "" - else: - value = eoa_value - element.attrib[attribute] = value - - # module.fail_json(msg="arf %s changing=%s as curval=%s changed tree=%s" % - # (xpath, changing, etree.tostring(tree, changing, element[attribute], pretty_print=True))) - - else: - module.fail_json(msg="unknown tree transformation=%s" % etree.tostring(tree, pretty_print=True)) - - return changed - - -def ensure_xpath_exists(module, tree, xpath, namespaces): - changed = False - - if not is_node(tree, xpath, namespaces): - changed = check_or_make_target(module, tree, xpath, namespaces) - - finish(module, tree, xpath, namespaces, changed) - - -def set_target_inner(module, tree, xpath, namespaces, attribute, value): - changed = False - - try: - if not is_node(tree, xpath, namespaces): - changed = check_or_make_target(module, tree, xpath, namespaces) - except Exception as e: - missing_namespace = "" - # NOTE: This checks only the namespaces defined in root element! - # TODO: Implement a more robust check to check for child namespaces' existence - if tree.getroot().nsmap and ":" not in xpath: - missing_namespace = "XML document has namespace(s) defined, but no namespace prefix(es) used in xpath!\n" - module.fail_json(msg="%sXpath %s causes a failure: %s\n -- tree is %s" % - (missing_namespace, xpath, e, etree.tostring(tree, pretty_print=True)), exception=traceback.format_exc()) - - if not is_node(tree, xpath, namespaces): - module.fail_json(msg="Xpath %s does not reference a node! tree is %s" % - (xpath, etree.tostring(tree, pretty_print=True))) - - for element in tree.xpath(xpath, namespaces=namespaces): - if not attribute: - changed = changed or (element.text != value) - if element.text != value: - element.text = value - else: - changed = changed or (element.get(attribute) != value) - if ":" in attribute: - attr_ns, attr_name = attribute.split(":") - # attribute = "{{%s}}%s" % (namespaces[attr_ns], attr_name) - attribute = "{{{0}}}{1}".format(namespaces[attr_ns], attr_name) - if element.get(attribute) != value: - element.set(attribute, value) - - return changed - - -def set_target(module, tree, xpath, namespaces, attribute, value): - changed = set_target_inner(module, tree, xpath, namespaces, attribute, value) - finish(module, tree, xpath, namespaces, changed) - - -def get_element_text(module, tree, xpath, namespaces): - if not is_node(tree, xpath, namespaces): - module.fail_json(msg="Xpath %s does not reference a node!" % xpath) - - elements = [] - for element in tree.xpath(xpath, namespaces=namespaces): - elements.append({element.tag: element.text}) - - finish(module, tree, xpath, namespaces, changed=False, msg=len(elements), hitcount=len(elements), matches=elements) - - -def get_element_attr(module, tree, xpath, namespaces): - if not is_node(tree, xpath, namespaces): - module.fail_json(msg="Xpath %s does not reference a node!" % xpath) - - elements = [] - for element in tree.xpath(xpath, namespaces=namespaces): - child = {} - for key in element.keys(): - value = element.get(key) - child.update({key: value}) - elements.append({element.tag: child}) - - finish(module, tree, xpath, namespaces, changed=False, msg=len(elements), hitcount=len(elements), matches=elements) - - -def child_to_element(module, child, in_type): - if in_type == 'xml': - infile = BytesIO(to_bytes(child, errors='surrogate_or_strict')) - - try: - parser = etree.XMLParser() - node = etree.parse(infile, parser) - return node.getroot() - except etree.XMLSyntaxError as e: - module.fail_json(msg="Error while parsing child element: %s" % e) - elif in_type == 'yaml': - if isinstance(child, string_types): - return etree.Element(child) - elif isinstance(child, MutableMapping): - if len(child) > 1: - module.fail_json(msg="Can only create children from hashes with one key") - - (key, value) = next(iteritems(child)) - if isinstance(value, MutableMapping): - children = value.pop('_', None) - - node = etree.Element(key, value) - - if children is not None: - if not isinstance(children, list): - module.fail_json(msg="Invalid children type: %s, must be list." % type(children)) - - subnodes = children_to_nodes(module, children) - node.extend(subnodes) - else: - node = etree.Element(key) - node.text = value - return node - else: - module.fail_json(msg="Invalid child type: %s. Children must be either strings or hashes." % type(child)) - else: - module.fail_json(msg="Invalid child input type: %s. Type must be either xml or yaml." % in_type) - - -def children_to_nodes(module=None, children=None, type='yaml'): - """turn a str/hash/list of str&hash into a list of elements""" - children = [] if children is None else children - - return [child_to_element(module, child, type) for child in children] - - -def make_pretty(module, tree): - xml_string = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print']) - - result = dict( - changed=False, - ) - - if module.params['path']: - xml_file = module.params['path'] - with open(xml_file, 'rb') as xml_content: - if xml_string != xml_content.read(): - result['changed'] = True - if not module.check_mode: - if module.params['backup']: - result['backup_file'] = module.backup_local(module.params['path']) - tree.write(xml_file, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print']) - - elif module.params['xmlstring']: - result['xmlstring'] = xml_string - # NOTE: Modifying a string is not considered a change ! - if xml_string != module.params['xmlstring']: - result['changed'] = True - - module.exit_json(**result) - - -def finish(module, tree, xpath, namespaces, changed=False, msg='', hitcount=0, matches=tuple()): - - result = dict( - actions=dict( - xpath=xpath, - namespaces=namespaces, - state=module.params['state'] - ), - changed=has_changed(tree), - ) - - if module.params['count'] or hitcount: - result['count'] = hitcount - - if module.params['print_match'] or matches: - result['matches'] = matches - - if msg: - result['msg'] = msg - - if result['changed']: - if module._diff: - result['diff'] = dict( - before=etree.tostring(orig_doc, xml_declaration=True, encoding='UTF-8', pretty_print=True), - after=etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=True), - ) - - if module.params['path'] and not module.check_mode: - if module.params['backup']: - result['backup_file'] = module.backup_local(module.params['path']) - tree.write(module.params['path'], xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print']) - - if module.params['xmlstring']: - result['xmlstring'] = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print']) - - module.exit_json(**result) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - path=dict(type='path', aliases=['dest', 'file']), - xmlstring=dict(type='str'), - xpath=dict(type='str'), - namespaces=dict(type='dict', default={}), - state=dict(type='str', default='present', choices=['absent', 'present'], aliases=['ensure']), - value=dict(type='raw'), - attribute=dict(type='raw'), - add_children=dict(type='list', elements='raw'), - set_children=dict(type='list', elements='raw'), - count=dict(type='bool', default=False), - print_match=dict(type='bool', default=False), - pretty_print=dict(type='bool', default=False), - content=dict(type='str', choices=['attribute', 'text']), - input_type=dict(type='str', default='yaml', choices=['xml', 'yaml']), - backup=dict(type='bool', default=False), - strip_cdata_tags=dict(type='bool', default=False), - insertbefore=dict(type='bool', default=False), - insertafter=dict(type='bool', default=False), - ), - supports_check_mode=True, - required_by=dict( - add_children=['xpath'], - attribute=['value'], - content=['xpath'], - set_children=['xpath'], - value=['xpath'], - ), - required_if=[ - ['count', True, ['xpath']], - ['print_match', True, ['xpath']], - ['insertbefore', True, ['xpath']], - ['insertafter', True, ['xpath']], - ], - required_one_of=[ - ['path', 'xmlstring'], - ['add_children', 'content', 'count', 'pretty_print', 'print_match', 'set_children', 'value'], - ], - mutually_exclusive=[ - ['add_children', 'content', 'count', 'print_match', 'set_children', 'value'], - ['path', 'xmlstring'], - ['insertbefore', 'insertafter'], - ], - ) - - xml_file = module.params['path'] - xml_string = module.params['xmlstring'] - xpath = module.params['xpath'] - namespaces = module.params['namespaces'] - state = module.params['state'] - value = json_dict_bytes_to_unicode(module.params['value']) - attribute = module.params['attribute'] - set_children = json_dict_bytes_to_unicode(module.params['set_children']) - add_children = json_dict_bytes_to_unicode(module.params['add_children']) - pretty_print = module.params['pretty_print'] - content = module.params['content'] - input_type = module.params['input_type'] - print_match = module.params['print_match'] - count = module.params['count'] - backup = module.params['backup'] - strip_cdata_tags = module.params['strip_cdata_tags'] - insertbefore = module.params['insertbefore'] - insertafter = module.params['insertafter'] - - # Check if we have lxml 2.3.0 or newer installed - if not HAS_LXML: - module.fail_json(msg=missing_required_lib("lxml"), exception=LXML_IMP_ERR) - elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('2.3.0'): - module.fail_json(msg='The xml ansible module requires lxml 2.3.0 or newer installed on the managed machine') - elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('3.0.0'): - module.warn('Using lxml version lower than 3.0.0 does not guarantee predictable element attribute order.') - - # Check if the file exists - if xml_string: - infile = BytesIO(to_bytes(xml_string, errors='surrogate_or_strict')) - elif os.path.isfile(xml_file): - infile = open(xml_file, 'rb') - else: - module.fail_json(msg="The target XML source '%s' does not exist." % xml_file) - - # Parse and evaluate xpath expression - if xpath is not None: - try: - etree.XPath(xpath) - except etree.XPathSyntaxError as e: - module.fail_json(msg="Syntax error in xpath expression: %s (%s)" % (xpath, e)) - except etree.XPathEvalError as e: - module.fail_json(msg="Evaluation error in xpath expression: %s (%s)" % (xpath, e)) - - # Try to parse in the target XML file - try: - parser = etree.XMLParser(remove_blank_text=pretty_print, strip_cdata=strip_cdata_tags) - doc = etree.parse(infile, parser) - except etree.XMLSyntaxError as e: - module.fail_json(msg="Error while parsing document: %s (%s)" % (xml_file or 'xml_string', e)) - - # Ensure we have the original copy to compare - global orig_doc - orig_doc = copy.deepcopy(doc) - - if print_match: - do_print_match(module, doc, xpath, namespaces) - - if count: - count_nodes(module, doc, xpath, namespaces) - - if content == 'attribute': - get_element_attr(module, doc, xpath, namespaces) - elif content == 'text': - get_element_text(module, doc, xpath, namespaces) - - # File exists: - if state == 'absent': - # - absent: delete xpath target - delete_xpath_target(module, doc, xpath, namespaces) - - # - present: carry on - - # children && value both set?: should have already aborted by now - # add_children && set_children both set?: should have already aborted by now - - # set_children set? - if set_children: - set_target_children(module, doc, xpath, namespaces, set_children, input_type) - - # add_children set? - if add_children: - add_target_children(module, doc, xpath, namespaces, add_children, input_type, insertbefore, insertafter) - - # No?: Carry on - - # Is the xpath target an attribute selector? - if value is not None: - set_target(module, doc, xpath, namespaces, attribute, value) - - # If an xpath was provided, we need to do something with the data - if xpath is not None: - ensure_xpath_exists(module, doc, xpath, namespaces) - - # Otherwise only reformat the xml data? - if pretty_print: - make_pretty(module, doc) - - module.fail_json(msg="Don't know what to do") - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/filesize.py b/ansible_collections/community/general/plugins/modules/filesize.py deleted file mode 120000 index fc4a211c..00000000 --- a/ansible_collections/community/general/plugins/modules/filesize.py +++ /dev/null @@ -1 +0,0 @@ -files/filesize.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/filesystem.py b/ansible_collections/community/general/plugins/modules/filesystem.py deleted file mode 120000 index eb9fd3c0..00000000 --- a/ansible_collections/community/general/plugins/modules/filesystem.py +++ /dev/null @@ -1 +0,0 @@ -system/filesystem.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/flatpak.py b/ansible_collections/community/general/plugins/modules/flatpak.py deleted file mode 120000 index 6d15cca1..00000000 --- a/ansible_collections/community/general/plugins/modules/flatpak.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/flatpak.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/flatpak_remote.py b/ansible_collections/community/general/plugins/modules/flatpak_remote.py deleted file mode 120000 index 0631b18e..00000000 --- a/ansible_collections/community/general/plugins/modules/flatpak_remote.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/flatpak_remote.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/flowdock.py b/ansible_collections/community/general/plugins/modules/flowdock.py deleted file mode 120000 index a308e608..00000000 --- a/ansible_collections/community/general/plugins/modules/flowdock.py +++ /dev/null @@ -1 +0,0 @@ -notification/flowdock.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/gandi_livedns.py b/ansible_collections/community/general/plugins/modules/gandi_livedns.py deleted file mode 120000 index 6a8a82fa..00000000 --- a/ansible_collections/community/general/plugins/modules/gandi_livedns.py +++ /dev/null @@ -1 +0,0 @@ -net_tools/gandi_livedns.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/gconftool2.py b/ansible_collections/community/general/plugins/modules/gconftool2.py deleted file mode 120000 index 0576fa98..00000000 --- a/ansible_collections/community/general/plugins/modules/gconftool2.py +++ /dev/null @@ -1 +0,0 @@ -system/gconftool2.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/gem.py b/ansible_collections/community/general/plugins/modules/gem.py deleted file mode 120000 index 77d323f6..00000000 --- a/ansible_collections/community/general/plugins/modules/gem.py +++ /dev/null @@ -1 +0,0 @@ -packaging/language/gem.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/git_config.py b/ansible_collections/community/general/plugins/modules/git_config.py deleted file mode 120000 index 35414a7c..00000000 --- a/ansible_collections/community/general/plugins/modules/git_config.py +++ /dev/null @@ -1 +0,0 @@ -source_control/git_config.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/github_deploy_key.py b/ansible_collections/community/general/plugins/modules/github_deploy_key.py deleted file mode 120000 index 8e55c13b..00000000 --- a/ansible_collections/community/general/plugins/modules/github_deploy_key.py +++ /dev/null @@ -1 +0,0 @@ -source_control/github/github_deploy_key.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/github_issue.py b/ansible_collections/community/general/plugins/modules/github_issue.py deleted file mode 120000 index 8c6005fa..00000000 --- a/ansible_collections/community/general/plugins/modules/github_issue.py +++ /dev/null @@ -1 +0,0 @@ -source_control/github/github_issue.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/github_key.py b/ansible_collections/community/general/plugins/modules/github_key.py deleted file mode 120000 index 131604db..00000000 --- a/ansible_collections/community/general/plugins/modules/github_key.py +++ /dev/null @@ -1 +0,0 @@ -source_control/github/github_key.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/github_release.py b/ansible_collections/community/general/plugins/modules/github_release.py deleted file mode 120000 index 63f9859c..00000000 --- a/ansible_collections/community/general/plugins/modules/github_release.py +++ /dev/null @@ -1 +0,0 @@ -source_control/github/github_release.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/github_repo.py b/ansible_collections/community/general/plugins/modules/github_repo.py deleted file mode 120000 index 4b8a1a3f..00000000 --- a/ansible_collections/community/general/plugins/modules/github_repo.py +++ /dev/null @@ -1 +0,0 @@ -source_control/github/github_repo.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/github_webhook.py b/ansible_collections/community/general/plugins/modules/github_webhook.py deleted file mode 120000 index 86cb5c4d..00000000 --- a/ansible_collections/community/general/plugins/modules/github_webhook.py +++ /dev/null @@ -1 +0,0 @@ -source_control/github/github_webhook.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/github_webhook_info.py b/ansible_collections/community/general/plugins/modules/github_webhook_info.py deleted file mode 120000 index f89f3c5a..00000000 --- a/ansible_collections/community/general/plugins/modules/github_webhook_info.py +++ /dev/null @@ -1 +0,0 @@ -source_control/github/github_webhook_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/gitlab_branch.py b/ansible_collections/community/general/plugins/modules/gitlab_branch.py deleted file mode 120000 index 3049ddb6..00000000 --- a/ansible_collections/community/general/plugins/modules/gitlab_branch.py +++ /dev/null @@ -1 +0,0 @@ -source_control/gitlab/gitlab_branch.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/gitlab_deploy_key.py b/ansible_collections/community/general/plugins/modules/gitlab_deploy_key.py deleted file mode 120000 index 77f36b8b..00000000 --- a/ansible_collections/community/general/plugins/modules/gitlab_deploy_key.py +++ /dev/null @@ -1 +0,0 @@ -source_control/gitlab/gitlab_deploy_key.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/gitlab_group.py b/ansible_collections/community/general/plugins/modules/gitlab_group.py deleted file mode 120000 index e07fe16e..00000000 --- a/ansible_collections/community/general/plugins/modules/gitlab_group.py +++ /dev/null @@ -1 +0,0 @@ -source_control/gitlab/gitlab_group.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/gitlab_group_members.py b/ansible_collections/community/general/plugins/modules/gitlab_group_members.py deleted file mode 120000 index d6f647aa..00000000 --- a/ansible_collections/community/general/plugins/modules/gitlab_group_members.py +++ /dev/null @@ -1 +0,0 @@ -source_control/gitlab/gitlab_group_members.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/gitlab_group_variable.py b/ansible_collections/community/general/plugins/modules/gitlab_group_variable.py deleted file mode 120000 index 8fdfaea5..00000000 --- a/ansible_collections/community/general/plugins/modules/gitlab_group_variable.py +++ /dev/null @@ -1 +0,0 @@ -source_control/gitlab/gitlab_group_variable.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/gitlab_hook.py b/ansible_collections/community/general/plugins/modules/gitlab_hook.py deleted file mode 120000 index b1e4bf6d..00000000 --- a/ansible_collections/community/general/plugins/modules/gitlab_hook.py +++ /dev/null @@ -1 +0,0 @@ -source_control/gitlab/gitlab_hook.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/gitlab_project.py b/ansible_collections/community/general/plugins/modules/gitlab_project.py deleted file mode 120000 index a11a9d2c..00000000 --- a/ansible_collections/community/general/plugins/modules/gitlab_project.py +++ /dev/null @@ -1 +0,0 @@ -source_control/gitlab/gitlab_project.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/gitlab_project_members.py b/ansible_collections/community/general/plugins/modules/gitlab_project_members.py deleted file mode 120000 index 2e1e69ac..00000000 --- a/ansible_collections/community/general/plugins/modules/gitlab_project_members.py +++ /dev/null @@ -1 +0,0 @@ -source_control/gitlab/gitlab_project_members.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/gitlab_project_variable.py b/ansible_collections/community/general/plugins/modules/gitlab_project_variable.py deleted file mode 120000 index 619caff9..00000000 --- a/ansible_collections/community/general/plugins/modules/gitlab_project_variable.py +++ /dev/null @@ -1 +0,0 @@ -source_control/gitlab/gitlab_project_variable.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/gitlab_protected_branch.py b/ansible_collections/community/general/plugins/modules/gitlab_protected_branch.py deleted file mode 120000 index 7af5b500..00000000 --- a/ansible_collections/community/general/plugins/modules/gitlab_protected_branch.py +++ /dev/null @@ -1 +0,0 @@ -source_control/gitlab/gitlab_protected_branch.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/gitlab_runner.py b/ansible_collections/community/general/plugins/modules/gitlab_runner.py deleted file mode 120000 index 2483db10..00000000 --- a/ansible_collections/community/general/plugins/modules/gitlab_runner.py +++ /dev/null @@ -1 +0,0 @@ -source_control/gitlab/gitlab_runner.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/gitlab_user.py b/ansible_collections/community/general/plugins/modules/gitlab_user.py deleted file mode 120000 index d4f515a1..00000000 --- a/ansible_collections/community/general/plugins/modules/gitlab_user.py +++ /dev/null @@ -1 +0,0 @@ -source_control/gitlab/gitlab_user.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/grove.py b/ansible_collections/community/general/plugins/modules/grove.py deleted file mode 120000 index 601449e5..00000000 --- a/ansible_collections/community/general/plugins/modules/grove.py +++ /dev/null @@ -1 +0,0 @@ -notification/grove.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/gunicorn.py b/ansible_collections/community/general/plugins/modules/gunicorn.py deleted file mode 120000 index 0a21d24c..00000000 --- a/ansible_collections/community/general/plugins/modules/gunicorn.py +++ /dev/null @@ -1 +0,0 @@ -web_infrastructure/gunicorn.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/hana_query.py b/ansible_collections/community/general/plugins/modules/hana_query.py deleted file mode 120000 index ab495c3b..00000000 --- a/ansible_collections/community/general/plugins/modules/hana_query.py +++ /dev/null @@ -1 +0,0 @@ -database/saphana/hana_query.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/haproxy.py b/ansible_collections/community/general/plugins/modules/haproxy.py deleted file mode 120000 index 5d619e68..00000000 --- a/ansible_collections/community/general/plugins/modules/haproxy.py +++ /dev/null @@ -1 +0,0 @@ -net_tools/haproxy.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/heroku_collaborator.py b/ansible_collections/community/general/plugins/modules/heroku_collaborator.py deleted file mode 120000 index ffd5540a..00000000 --- a/ansible_collections/community/general/plugins/modules/heroku_collaborator.py +++ /dev/null @@ -1 +0,0 @@ -cloud/heroku/heroku_collaborator.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/hg.py b/ansible_collections/community/general/plugins/modules/hg.py deleted file mode 120000 index 4f26abc2..00000000 --- a/ansible_collections/community/general/plugins/modules/hg.py +++ /dev/null @@ -1 +0,0 @@ -source_control/hg.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/hipchat.py b/ansible_collections/community/general/plugins/modules/hipchat.py deleted file mode 120000 index cd4565a6..00000000 --- a/ansible_collections/community/general/plugins/modules/hipchat.py +++ /dev/null @@ -1 +0,0 @@ -notification/hipchat.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/homebrew.py b/ansible_collections/community/general/plugins/modules/homebrew.py deleted file mode 120000 index 0c20cdd4..00000000 --- a/ansible_collections/community/general/plugins/modules/homebrew.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/homebrew.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/homebrew_cask.py b/ansible_collections/community/general/plugins/modules/homebrew_cask.py deleted file mode 120000 index 3b9813a9..00000000 --- a/ansible_collections/community/general/plugins/modules/homebrew_cask.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/homebrew_cask.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/homebrew_tap.py b/ansible_collections/community/general/plugins/modules/homebrew_tap.py deleted file mode 120000 index c93395df..00000000 --- a/ansible_collections/community/general/plugins/modules/homebrew_tap.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/homebrew_tap.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/homectl.py b/ansible_collections/community/general/plugins/modules/homectl.py deleted file mode 120000 index 85c76631..00000000 --- a/ansible_collections/community/general/plugins/modules/homectl.py +++ /dev/null @@ -1 +0,0 @@ -system/homectl.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/honeybadger_deployment.py b/ansible_collections/community/general/plugins/modules/honeybadger_deployment.py deleted file mode 120000 index b63f6f14..00000000 --- a/ansible_collections/community/general/plugins/modules/honeybadger_deployment.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/honeybadger_deployment.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/hpilo_boot.py b/ansible_collections/community/general/plugins/modules/hpilo_boot.py deleted file mode 120000 index 294e302d..00000000 --- a/ansible_collections/community/general/plugins/modules/hpilo_boot.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/hpilo/hpilo_boot.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/hpilo_info.py b/ansible_collections/community/general/plugins/modules/hpilo_info.py deleted file mode 120000 index d32e66b9..00000000 --- a/ansible_collections/community/general/plugins/modules/hpilo_info.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/hpilo/hpilo_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/hponcfg.py b/ansible_collections/community/general/plugins/modules/hponcfg.py deleted file mode 120000 index e12206a0..00000000 --- a/ansible_collections/community/general/plugins/modules/hponcfg.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/hpilo/hponcfg.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/htpasswd.py b/ansible_collections/community/general/plugins/modules/htpasswd.py deleted file mode 120000 index 93f95426..00000000 --- a/ansible_collections/community/general/plugins/modules/htpasswd.py +++ /dev/null @@ -1 +0,0 @@ -web_infrastructure/htpasswd.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/hwc_ecs_instance.py b/ansible_collections/community/general/plugins/modules/hwc_ecs_instance.py deleted file mode 120000 index 60ab4fa1..00000000 --- a/ansible_collections/community/general/plugins/modules/hwc_ecs_instance.py +++ /dev/null @@ -1 +0,0 @@ -cloud/huawei/hwc_ecs_instance.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/hwc_evs_disk.py b/ansible_collections/community/general/plugins/modules/hwc_evs_disk.py deleted file mode 120000 index ed78533d..00000000 --- a/ansible_collections/community/general/plugins/modules/hwc_evs_disk.py +++ /dev/null @@ -1 +0,0 @@ -cloud/huawei/hwc_evs_disk.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/hwc_network_vpc.py b/ansible_collections/community/general/plugins/modules/hwc_network_vpc.py deleted file mode 120000 index b91668bf..00000000 --- a/ansible_collections/community/general/plugins/modules/hwc_network_vpc.py +++ /dev/null @@ -1 +0,0 @@ -cloud/huawei/hwc_network_vpc.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/hwc_smn_topic.py b/ansible_collections/community/general/plugins/modules/hwc_smn_topic.py deleted file mode 120000 index 29cd721f..00000000 --- a/ansible_collections/community/general/plugins/modules/hwc_smn_topic.py +++ /dev/null @@ -1 +0,0 @@ -cloud/huawei/hwc_smn_topic.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/hwc_vpc_eip.py b/ansible_collections/community/general/plugins/modules/hwc_vpc_eip.py deleted file mode 120000 index 47dc4bb7..00000000 --- a/ansible_collections/community/general/plugins/modules/hwc_vpc_eip.py +++ /dev/null @@ -1 +0,0 @@ -cloud/huawei/hwc_vpc_eip.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/hwc_vpc_peering_connect.py b/ansible_collections/community/general/plugins/modules/hwc_vpc_peering_connect.py deleted file mode 120000 index 4444615c..00000000 --- a/ansible_collections/community/general/plugins/modules/hwc_vpc_peering_connect.py +++ /dev/null @@ -1 +0,0 @@ -cloud/huawei/hwc_vpc_peering_connect.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/hwc_vpc_port.py b/ansible_collections/community/general/plugins/modules/hwc_vpc_port.py deleted file mode 120000 index c223dd27..00000000 --- a/ansible_collections/community/general/plugins/modules/hwc_vpc_port.py +++ /dev/null @@ -1 +0,0 @@ -cloud/huawei/hwc_vpc_port.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/hwc_vpc_private_ip.py b/ansible_collections/community/general/plugins/modules/hwc_vpc_private_ip.py deleted file mode 120000 index a5e512ef..00000000 --- a/ansible_collections/community/general/plugins/modules/hwc_vpc_private_ip.py +++ /dev/null @@ -1 +0,0 @@ -cloud/huawei/hwc_vpc_private_ip.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/hwc_vpc_route.py b/ansible_collections/community/general/plugins/modules/hwc_vpc_route.py deleted file mode 120000 index e3c05664..00000000 --- a/ansible_collections/community/general/plugins/modules/hwc_vpc_route.py +++ /dev/null @@ -1 +0,0 @@ -cloud/huawei/hwc_vpc_route.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group.py b/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group.py deleted file mode 120000 index a6bbbec6..00000000 --- a/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group.py +++ /dev/null @@ -1 +0,0 @@ -cloud/huawei/hwc_vpc_security_group.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group_rule.py b/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group_rule.py deleted file mode 120000 index cf725083..00000000 --- a/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group_rule.py +++ /dev/null @@ -1 +0,0 @@ -cloud/huawei/hwc_vpc_security_group_rule.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/hwc_vpc_subnet.py b/ansible_collections/community/general/plugins/modules/hwc_vpc_subnet.py deleted file mode 120000 index 89507c6a..00000000 --- a/ansible_collections/community/general/plugins/modules/hwc_vpc_subnet.py +++ /dev/null @@ -1 +0,0 @@ -cloud/huawei/hwc_vpc_subnet.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ibm_sa_domain.py b/ansible_collections/community/general/plugins/modules/ibm_sa_domain.py deleted file mode 120000 index 2a42e31b..00000000 --- a/ansible_collections/community/general/plugins/modules/ibm_sa_domain.py +++ /dev/null @@ -1 +0,0 @@ -storage/ibm/ibm_sa_domain.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ibm_sa_host.py b/ansible_collections/community/general/plugins/modules/ibm_sa_host.py deleted file mode 120000 index ed640a8d..00000000 --- a/ansible_collections/community/general/plugins/modules/ibm_sa_host.py +++ /dev/null @@ -1 +0,0 @@ -storage/ibm/ibm_sa_host.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ibm_sa_host_ports.py b/ansible_collections/community/general/plugins/modules/ibm_sa_host_ports.py deleted file mode 120000 index be9d50e0..00000000 --- a/ansible_collections/community/general/plugins/modules/ibm_sa_host_ports.py +++ /dev/null @@ -1 +0,0 @@ -storage/ibm/ibm_sa_host_ports.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ibm_sa_pool.py b/ansible_collections/community/general/plugins/modules/ibm_sa_pool.py deleted file mode 120000 index 62188246..00000000 --- a/ansible_collections/community/general/plugins/modules/ibm_sa_pool.py +++ /dev/null @@ -1 +0,0 @@ -storage/ibm/ibm_sa_pool.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ibm_sa_vol.py b/ansible_collections/community/general/plugins/modules/ibm_sa_vol.py deleted file mode 120000 index f38933a9..00000000 --- a/ansible_collections/community/general/plugins/modules/ibm_sa_vol.py +++ /dev/null @@ -1 +0,0 @@ -storage/ibm/ibm_sa_vol.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ibm_sa_vol_map.py b/ansible_collections/community/general/plugins/modules/ibm_sa_vol_map.py deleted file mode 120000 index 70456484..00000000 --- a/ansible_collections/community/general/plugins/modules/ibm_sa_vol_map.py +++ /dev/null @@ -1 +0,0 @@ -storage/ibm/ibm_sa_vol_map.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/icinga2_feature.py b/ansible_collections/community/general/plugins/modules/icinga2_feature.py deleted file mode 120000 index 80b3fa5e..00000000 --- a/ansible_collections/community/general/plugins/modules/icinga2_feature.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/icinga2_feature.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/icinga2_host.py b/ansible_collections/community/general/plugins/modules/icinga2_host.py deleted file mode 120000 index e42281bc..00000000 --- a/ansible_collections/community/general/plugins/modules/icinga2_host.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/icinga2_host.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_config.py b/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_config.py deleted file mode 100644 index 2b41dfb0..00000000 --- a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_config.py +++ /dev/null @@ -1,362 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Fran Fitzpatrick -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_config -author: Fran Fitzpatrick (@fxfitz) -short_description: Manage Global FreeIPA Configuration Settings -description: -- Modify global configuration settings of a FreeIPA Server. -options: - ipaconfigstring: - description: Extra hashes to generate in password plug-in. - aliases: ["configstring"] - type: list - elements: str - choices: ["AllowNThash", "KDC:Disable Last Success", "KDC:Disable Lockout", "KDC:Disable Default Preauth for SPNs"] - version_added: '2.5.0' - ipadefaultloginshell: - description: Default shell for new users. - aliases: ["loginshell"] - type: str - ipadefaultemaildomain: - description: Default e-mail domain for new users. - aliases: ["emaildomain"] - type: str - ipadefaultprimarygroup: - description: Default group for new users. - aliases: ["primarygroup"] - type: str - version_added: '2.5.0' - ipagroupsearchfields: - description: A list of fields to search in when searching for groups. - aliases: ["groupsearchfields"] - type: list - elements: str - version_added: '2.5.0' - ipahomesrootdir: - description: Default location of home directories. - aliases: ["homesrootdir"] - type: str - version_added: '2.5.0' - ipakrbauthzdata: - description: Default types of PAC supported for services. - aliases: ["krbauthzdata"] - type: list - elements: str - choices: ["MS-PAC", "PAD", "nfs:NONE"] - version_added: '2.5.0' - ipamaxusernamelength: - description: Maximum length of usernames. - aliases: ["maxusernamelength"] - type: int - version_added: '2.5.0' - ipapwdexpadvnotify: - description: Notice of impending password expiration, in days. - aliases: ["pwdexpadvnotify"] - type: int - version_added: '2.5.0' - ipasearchrecordslimit: - description: Maximum number of records to search (-1 or 0 is unlimited). - aliases: ["searchrecordslimit"] - type: int - version_added: '2.5.0' - ipasearchtimelimit: - description: Maximum amount of time (seconds) for a search (-1 or 0 is unlimited). - aliases: ["searchtimelimit"] - type: int - version_added: '2.5.0' - ipaselinuxusermaporder: - description: The SELinux user map order (order in increasing priority of SELinux users). - aliases: ["selinuxusermaporder"] - type: list - elements: str - version_added: '3.7.0' - ipauserauthtype: - description: The authentication type to use by default. - aliases: ["userauthtype"] - choices: ["password", "radius", "otp", "pkinit", "hardened", "disabled"] - type: list - elements: str - version_added: '2.5.0' - ipausersearchfields: - description: A list of fields to search in when searching for users. - aliases: ["usersearchfields"] - type: list - elements: str - version_added: '2.5.0' -extends_documentation_fragment: -- community.general.ipa.documentation - -''' - -EXAMPLES = r''' -- name: Ensure password plugin features DC:Disable Last Success and KDC:Disable Lockout are enabled - community.general.ipa_config: - ipaconfigstring: ["KDC:Disable Last Success", "KDC:Disable Lockout"] - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret - -- name: Ensure the default login shell is bash - community.general.ipa_config: - ipadefaultloginshell: /bin/bash - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret - -- name: Ensure the default e-mail domain is ansible.com - community.general.ipa_config: - ipadefaultemaildomain: ansible.com - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret - -- name: Ensure the default primary group is set to ipausers - community.general.ipa_config: - ipadefaultprimarygroup: ipausers - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret - -- name: Ensure the group search fields are set to 'cn,description' - community.general.ipa_config: - ipagroupsearchfields: ['cn', 'description'] - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret - -- name: Ensure the home directory location is set to /home - community.general.ipa_config: - ipahomesrootdir: /home - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret - -- name: Ensure the default types of PAC supported for services is set to MS-PAC and PAD - community.general.ipa_config: - ipakrbauthzdata: ["MS-PAC", "PAD"] - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret - -- name: Ensure the maximum user name length is set to 32 - community.general.ipa_config: - ipamaxusernamelength: 32 - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret - -- name: Ensure the password expiration notice is set to 4 days - community.general.ipa_config: - ipapwdexpadvnotify: 4 - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret - -- name: Ensure the search record limit is set to 100 - community.general.ipa_config: - ipasearchrecordslimit: 100 - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret - -- name: Ensure the search time limit is set to 2 seconds - community.general.ipa_config: - ipasearchtimelimit: 2 - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret - -- name: Ensure the default user auth type is password - community.general.ipa_config: - ipauserauthtype: ['password'] - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret - -- name: Ensure the user search fields is set to 'uid,givenname,sn,ou,title' - community.general.ipa_config: - ipausersearchfields: ['uid', 'givenname', 'sn', 'ou', 'title'] - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret - -- name: Ensure the SELinux user map order is set - community.general.ipa_config: - ipaselinuxusermaporder: - - "guest_u:s0" - - "xguest_u:s0" - - "user_u:s0" - - "staff_u:s0-s0:c0.c1023" - - "unconfined_u:s0-s0:c0.c1023" - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret -''' - -RETURN = r''' -config: - description: Configuration as returned by IPA API. - returned: always - type: dict -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class ConfigIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(ConfigIPAClient, self).__init__(module, host, port, protocol) - - def config_show(self): - return self._post_json(method='config_show', name=None) - - def config_mod(self, name, item): - return self._post_json(method='config_mod', name=name, item=item) - - -def get_config_dict(ipaconfigstring=None, ipadefaultloginshell=None, - ipadefaultemaildomain=None, ipadefaultprimarygroup=None, - ipagroupsearchfields=None, ipahomesrootdir=None, - ipakrbauthzdata=None, ipamaxusernamelength=None, - ipapwdexpadvnotify=None, ipasearchrecordslimit=None, - ipasearchtimelimit=None, ipaselinuxusermaporder=None, - ipauserauthtype=None, ipausersearchfields=None): - config = {} - if ipaconfigstring is not None: - config['ipaconfigstring'] = ipaconfigstring - if ipadefaultloginshell is not None: - config['ipadefaultloginshell'] = ipadefaultloginshell - if ipadefaultemaildomain is not None: - config['ipadefaultemaildomain'] = ipadefaultemaildomain - if ipadefaultprimarygroup is not None: - config['ipadefaultprimarygroup'] = ipadefaultprimarygroup - if ipagroupsearchfields is not None: - config['ipagroupsearchfields'] = ','.join(ipagroupsearchfields) - if ipahomesrootdir is not None: - config['ipahomesrootdir'] = ipahomesrootdir - if ipakrbauthzdata is not None: - config['ipakrbauthzdata'] = ipakrbauthzdata - if ipamaxusernamelength is not None: - config['ipamaxusernamelength'] = str(ipamaxusernamelength) - if ipapwdexpadvnotify is not None: - config['ipapwdexpadvnotify'] = str(ipapwdexpadvnotify) - if ipasearchrecordslimit is not None: - config['ipasearchrecordslimit'] = str(ipasearchrecordslimit) - if ipasearchtimelimit is not None: - config['ipasearchtimelimit'] = str(ipasearchtimelimit) - if ipaselinuxusermaporder is not None: - config['ipaselinuxusermaporder'] = '$'.join(ipaselinuxusermaporder) - if ipauserauthtype is not None: - config['ipauserauthtype'] = ipauserauthtype - if ipausersearchfields is not None: - config['ipausersearchfields'] = ','.join(ipausersearchfields) - - return config - - -def get_config_diff(client, ipa_config, module_config): - return client.get_diff(ipa_data=ipa_config, module_data=module_config) - - -def ensure(module, client): - module_config = get_config_dict( - ipaconfigstring=module.params.get('ipaconfigstring'), - ipadefaultloginshell=module.params.get('ipadefaultloginshell'), - ipadefaultemaildomain=module.params.get('ipadefaultemaildomain'), - ipadefaultprimarygroup=module.params.get('ipadefaultprimarygroup'), - ipagroupsearchfields=module.params.get('ipagroupsearchfields'), - ipahomesrootdir=module.params.get('ipahomesrootdir'), - ipakrbauthzdata=module.params.get('ipakrbauthzdata'), - ipamaxusernamelength=module.params.get('ipamaxusernamelength'), - ipapwdexpadvnotify=module.params.get('ipapwdexpadvnotify'), - ipasearchrecordslimit=module.params.get('ipasearchrecordslimit'), - ipasearchtimelimit=module.params.get('ipasearchtimelimit'), - ipaselinuxusermaporder=module.params.get('ipaselinuxusermaporder'), - ipauserauthtype=module.params.get('ipauserauthtype'), - ipausersearchfields=module.params.get('ipausersearchfields'), - ) - ipa_config = client.config_show() - diff = get_config_diff(client, ipa_config, module_config) - - changed = False - new_config = {} - for module_key in diff: - if module_config.get(module_key) != ipa_config.get(module_key, None): - changed = True - new_config.update({module_key: module_config.get(module_key)}) - - if changed and not module.check_mode: - client.config_mod(name=None, item=new_config) - - return changed, client.config_show() - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update( - ipaconfigstring=dict(type='list', elements='str', - choices=['AllowNThash', - 'KDC:Disable Last Success', - 'KDC:Disable Lockout', - 'KDC:Disable Default Preauth for SPNs'], - aliases=['configstring']), - ipadefaultloginshell=dict(type='str', aliases=['loginshell']), - ipadefaultemaildomain=dict(type='str', aliases=['emaildomain']), - ipadefaultprimarygroup=dict(type='str', aliases=['primarygroup']), - ipagroupsearchfields=dict(type='list', elements='str', - aliases=['groupsearchfields']), - ipahomesrootdir=dict(type='str', aliases=['homesrootdir']), - ipakrbauthzdata=dict(type='list', elements='str', - choices=['MS-PAC', 'PAD', 'nfs:NONE'], - aliases=['krbauthzdata']), - ipamaxusernamelength=dict(type='int', aliases=['maxusernamelength']), - ipapwdexpadvnotify=dict(type='int', aliases=['pwdexpadvnotify']), - ipasearchrecordslimit=dict(type='int', aliases=['searchrecordslimit']), - ipasearchtimelimit=dict(type='int', aliases=['searchtimelimit']), - ipaselinuxusermaporder=dict(type='list', elements='str', - aliases=['selinuxusermaporder']), - ipauserauthtype=dict(type='list', elements='str', - aliases=['userauthtype'], - choices=["password", "radius", "otp", "pkinit", - "hardened", "disabled"]), - ipausersearchfields=dict(type='list', elements='str', - aliases=['usersearchfields']), - ) - - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True - ) - - client = ConfigIPAClient( - module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot'] - ) - - try: - client.login( - username=module.params['ipa_user'], - password=module.params['ipa_pass'] - ) - changed, user = ensure(module, client) - module.exit_json(changed=changed, user=user) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_dnsrecord.py b/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_dnsrecord.py deleted file mode 100644 index 73b66956..00000000 --- a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_dnsrecord.py +++ /dev/null @@ -1,319 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Abhijeet Kasurde (akasurde@redhat.com) -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_dnsrecord -author: Abhijeet Kasurde (@Akasurde) -short_description: Manage FreeIPA DNS records -description: -- Add, modify and delete an IPA DNS Record using IPA API. -options: - zone_name: - description: - - The DNS zone name to which DNS record needs to be managed. - required: true - type: str - record_name: - description: - - The DNS record name to manage. - required: true - aliases: ["name"] - type: str - record_type: - description: - - The type of DNS record name. - - Currently, 'A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'PTR', 'TXT', 'SRV' and 'MX' are supported. - - "'A6', 'CNAME', 'DNAME' and 'TXT' are added in version 2.5." - - "'SRV' and 'MX' are added in version 2.8." - required: false - default: 'A' - choices: ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'MX', 'PTR', 'SRV', 'TXT'] - type: str - record_value: - description: - - Manage DNS record name with this value. - - In the case of 'A' or 'AAAA' record types, this will be the IP address. - - In the case of 'A6' record type, this will be the A6 Record data. - - In the case of 'CNAME' record type, this will be the hostname. - - In the case of 'DNAME' record type, this will be the DNAME target. - - In the case of 'PTR' record type, this will be the hostname. - - In the case of 'TXT' record type, this will be a text. - - In the case of 'SRV' record type, this will be a service record. - - In the case of 'MX' record type, this will be a mail exchanger record. - required: true - type: str - record_ttl: - description: - - Set the TTL for the record. - - Applies only when adding a new or changing the value of record_value. - required: false - type: int - state: - description: State to ensure - required: false - default: present - choices: ["absent", "present"] - type: str -extends_documentation_fragment: -- community.general.ipa.documentation - -''' - -EXAMPLES = r''' -- name: Ensure dns record is present - community.general.ipa_dnsrecord: - ipa_host: spider.example.com - ipa_pass: Passw0rd! - state: present - zone_name: example.com - record_name: vm-001 - record_type: 'AAAA' - record_value: '::1' - -- name: Ensure that dns record exists with a TTL - community.general.ipa_dnsrecord: - name: host02 - zone_name: example.com - record_type: 'AAAA' - record_value: '::1' - record_ttl: 300 - ipa_host: ipa.example.com - ipa_pass: topsecret - state: present - -- name: Ensure a PTR record is present - community.general.ipa_dnsrecord: - ipa_host: spider.example.com - ipa_pass: Passw0rd! - state: present - zone_name: 2.168.192.in-addr.arpa - record_name: 5 - record_type: 'PTR' - record_value: 'internal.ipa.example.com' - -- name: Ensure a TXT record is present - community.general.ipa_dnsrecord: - ipa_host: spider.example.com - ipa_pass: Passw0rd! - state: present - zone_name: example.com - record_name: _kerberos - record_type: 'TXT' - record_value: 'EXAMPLE.COM' - -- name: Ensure an SRV record is present - community.general.ipa_dnsrecord: - ipa_host: spider.example.com - ipa_pass: Passw0rd! - state: present - zone_name: example.com - record_name: _kerberos._udp.example.com - record_type: 'SRV' - record_value: '10 50 88 ipa.example.com' - -- name: Ensure an MX record is present - community.general.ipa_dnsrecord: - ipa_host: spider.example.com - ipa_pass: Passw0rd! - state: present - zone_name: example.com - record_name: '@' - record_type: 'MX' - record_value: '1 mailserver.example.com' - -- name: Ensure that dns record is removed - community.general.ipa_dnsrecord: - name: host01 - zone_name: example.com - record_type: 'AAAA' - record_value: '::1' - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - state: absent -''' - -RETURN = r''' -dnsrecord: - description: DNS record as returned by IPA API. - returned: always - type: dict -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class DNSRecordIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(DNSRecordIPAClient, self).__init__(module, host, port, protocol) - - def dnsrecord_find(self, zone_name, record_name): - if record_name == '@': - return self._post_json(method='dnsrecord_show', name=zone_name, item={'idnsname': record_name, 'all': True}) - else: - return self._post_json(method='dnsrecord_find', name=zone_name, item={'idnsname': record_name, 'all': True}) - - def dnsrecord_add(self, zone_name=None, record_name=None, details=None): - item = dict(idnsname=record_name) - if details['record_type'] == 'A': - item.update(a_part_ip_address=details['record_value']) - elif details['record_type'] == 'AAAA': - item.update(aaaa_part_ip_address=details['record_value']) - elif details['record_type'] == 'A6': - item.update(a6_part_data=details['record_value']) - elif details['record_type'] == 'CNAME': - item.update(cname_part_hostname=details['record_value']) - elif details['record_type'] == 'DNAME': - item.update(dname_part_target=details['record_value']) - elif details['record_type'] == 'PTR': - item.update(ptr_part_hostname=details['record_value']) - elif details['record_type'] == 'TXT': - item.update(txtrecord=details['record_value']) - elif details['record_type'] == 'SRV': - item.update(srvrecord=details['record_value']) - elif details['record_type'] == 'MX': - item.update(mxrecord=details['record_value']) - - if details.get('record_ttl'): - item.update(dnsttl=details['record_ttl']) - - return self._post_json(method='dnsrecord_add', name=zone_name, item=item) - - def dnsrecord_mod(self, zone_name=None, record_name=None, details=None): - item = get_dnsrecord_dict(details) - item.update(idnsname=record_name) - if details.get('record_ttl'): - item.update(dnsttl=details['record_ttl']) - return self._post_json(method='dnsrecord_mod', name=zone_name, item=item) - - def dnsrecord_del(self, zone_name=None, record_name=None, details=None): - item = get_dnsrecord_dict(details) - item.update(idnsname=record_name) - return self._post_json(method='dnsrecord_del', name=zone_name, item=item) - - -def get_dnsrecord_dict(details=None): - module_dnsrecord = dict() - if details['record_type'] == 'A' and details['record_value']: - module_dnsrecord.update(arecord=details['record_value']) - elif details['record_type'] == 'AAAA' and details['record_value']: - module_dnsrecord.update(aaaarecord=details['record_value']) - elif details['record_type'] == 'A6' and details['record_value']: - module_dnsrecord.update(a6record=details['record_value']) - elif details['record_type'] == 'CNAME' and details['record_value']: - module_dnsrecord.update(cnamerecord=details['record_value']) - elif details['record_type'] == 'DNAME' and details['record_value']: - module_dnsrecord.update(dnamerecord=details['record_value']) - elif details['record_type'] == 'PTR' and details['record_value']: - module_dnsrecord.update(ptrrecord=details['record_value']) - elif details['record_type'] == 'TXT' and details['record_value']: - module_dnsrecord.update(txtrecord=details['record_value']) - elif details['record_type'] == 'SRV' and details['record_value']: - module_dnsrecord.update(srvrecord=details['record_value']) - elif details['record_type'] == 'MX' and details['record_value']: - module_dnsrecord.update(mxrecord=details['record_value']) - - if details.get('record_ttl'): - module_dnsrecord.update(dnsttl=details['record_ttl']) - - return module_dnsrecord - - -def get_dnsrecord_diff(client, ipa_dnsrecord, module_dnsrecord): - details = get_dnsrecord_dict(module_dnsrecord) - return client.get_diff(ipa_data=ipa_dnsrecord, module_data=details) - - -def ensure(module, client): - zone_name = module.params['zone_name'] - record_name = module.params['record_name'] - record_ttl = module.params.get('record_ttl') - state = module.params['state'] - - ipa_dnsrecord = client.dnsrecord_find(zone_name, record_name) - - module_dnsrecord = dict( - record_type=module.params['record_type'], - record_value=module.params['record_value'], - record_ttl=to_native(record_ttl, nonstring='passthru'), - ) - - # ttl is not required to change records - if module_dnsrecord['record_ttl'] is None: - module_dnsrecord.pop('record_ttl') - - changed = False - if state == 'present': - if not ipa_dnsrecord: - changed = True - if not module.check_mode: - client.dnsrecord_add(zone_name=zone_name, - record_name=record_name, - details=module_dnsrecord) - else: - diff = get_dnsrecord_diff(client, ipa_dnsrecord, module_dnsrecord) - if len(diff) > 0: - changed = True - if not module.check_mode: - client.dnsrecord_mod(zone_name=zone_name, - record_name=record_name, - details=module_dnsrecord) - else: - if ipa_dnsrecord: - changed = True - if not module.check_mode: - client.dnsrecord_del(zone_name=zone_name, - record_name=record_name, - details=module_dnsrecord) - - return changed, client.dnsrecord_find(zone_name, record_name) - - -def main(): - record_types = ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'PTR', 'TXT', 'SRV', 'MX'] - argument_spec = ipa_argument_spec() - argument_spec.update( - zone_name=dict(type='str', required=True), - record_name=dict(type='str', aliases=['name'], required=True), - record_type=dict(type='str', default='A', choices=record_types), - record_value=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['present', 'absent']), - record_ttl=dict(type='int', required=False), - ) - - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True - ) - - client = DNSRecordIPAClient( - module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot'] - ) - - try: - client.login( - username=module.params['ipa_user'], - password=module.params['ipa_pass'] - ) - changed, record = ensure(module, client) - module.exit_json(changed=changed, record=record) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_dnszone.py b/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_dnszone.py deleted file mode 100644 index 33ae59e9..00000000 --- a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_dnszone.py +++ /dev/null @@ -1,197 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Fran Fitzpatrick (francis.x.fitzpatrick@gmail.com) -# Borrowed heavily from other work by Abhijeet Kasurde (akasurde@redhat.com) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_dnszone -author: Fran Fitzpatrick (@fxfitz) -short_description: Manage FreeIPA DNS Zones -description: -- Add and delete an IPA DNS Zones using IPA API -options: - zone_name: - description: - - The DNS zone name to which needs to be managed. - required: true - type: str - state: - description: State to ensure - required: false - default: present - choices: ["absent", "present"] - type: str - dynamicupdate: - description: Apply dynamic update to zone. - default: false - type: bool - allowsyncptr: - description: Allow synchronization of forward and reverse records in the zone. - default: false - type: bool - version_added: 4.3.0 -extends_documentation_fragment: -- community.general.ipa.documentation - -''' - -EXAMPLES = r''' -- name: Ensure dns zone is present - community.general.ipa_dnszone: - ipa_host: spider.example.com - ipa_pass: Passw0rd! - state: present - zone_name: example.com - -- name: Ensure dns zone is present and is dynamic update - community.general.ipa_dnszone: - ipa_host: spider.example.com - ipa_pass: Passw0rd! - state: present - zone_name: example.com - dynamicupdate: true - -- name: Ensure that dns zone is removed - community.general.ipa_dnszone: - zone_name: example.com - ipa_host: localhost - ipa_user: admin - ipa_pass: topsecret - state: absent - -- name: Ensure dns zone is present and is allowing sync - community.general.ipa_dnszone: - ipa_host: spider.example.com - ipa_pass: Passw0rd! - state: present - zone_name: example.com - allowsyncptr: true -''' - -RETURN = r''' -zone: - description: DNS zone as returned by IPA API. - returned: always - type: dict -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class DNSZoneIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(DNSZoneIPAClient, self).__init__(module, host, port, protocol) - - def dnszone_find(self, zone_name, details=None): - items = {'all': 'true', - 'idnsname': zone_name, } - if details is not None: - items.update(details) - - return self._post_json( - method='dnszone_find', - name=zone_name, - item=items - ) - - def dnszone_add(self, zone_name=None, details=None): - items = {} - if details is not None: - items.update(details) - - return self._post_json( - method='dnszone_add', - name=zone_name, - item=items - ) - - def dnszone_mod(self, zone_name=None, details=None): - items = {} - if details is not None: - items.update(details) - - return self._post_json( - method='dnszone_mod', - name=zone_name, - item=items - ) - - def dnszone_del(self, zone_name=None, record_name=None, details=None): - return self._post_json( - method='dnszone_del', name=zone_name, item={}) - - -def ensure(module, client): - zone_name = module.params['zone_name'] - state = module.params['state'] - dynamicupdate = module.params['dynamicupdate'] - allowsyncptr = module.params['allowsyncptr'] - - changed = False - - # does zone exist - ipa_dnszone = client.dnszone_find(zone_name) - - if state == 'present': - if not ipa_dnszone: - - changed = True - if not module.check_mode: - client.dnszone_add(zone_name=zone_name, details={'idnsallowdynupdate': dynamicupdate, 'idnsallowsyncptr': allowsyncptr}) - elif ipa_dnszone['idnsallowdynupdate'][0] != str(dynamicupdate).upper() or ipa_dnszone['idnsallowsyncptr'][0] != str(allowsyncptr).upper(): - changed = True - if not module.check_mode: - client.dnszone_mod(zone_name=zone_name, details={'idnsallowdynupdate': dynamicupdate, 'idnsallowsyncptr': allowsyncptr}) - else: - changed = False - - # state is absent - else: - # check for generic zone existence - if ipa_dnszone: - changed = True - if not module.check_mode: - client.dnszone_del(zone_name=zone_name) - - return changed, client.dnszone_find(zone_name) - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update(zone_name=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['present', 'absent']), - dynamicupdate=dict(type='bool', required=False, default=False), - allowsyncptr=dict(type='bool', required=False, default=False), - ) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - ) - - client = DNSZoneIPAClient( - module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot'] - ) - - try: - client.login( - username=module.params['ipa_user'], - password=module.params['ipa_pass'] - ) - changed, zone = ensure(module, client) - module.exit_json(changed=changed, zone=zone) - except Exception as e: - module.fail_json(msg=to_native(e)) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_group.py b/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_group.py deleted file mode 100644 index d6af57ba..00000000 --- a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_group.py +++ /dev/null @@ -1,283 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_group -author: Thomas Krahn (@Nosmoht) -short_description: Manage FreeIPA group -description: -- Add, modify and delete group within IPA server -options: - append: - description: - - If C(yes), add the listed I(user) and I(group) to the group members. - - If C(no), only the listed I(user) and I(group) will be group members, removing any other members. - default: no - type: bool - version_added: 4.0.0 - cn: - description: - - Canonical name. - - Can not be changed as it is the unique identifier. - required: true - aliases: ['name'] - type: str - description: - description: - - Description of the group. - type: str - external: - description: - - Allow adding external non-IPA members from trusted domains. - type: bool - gidnumber: - description: - - GID (use this option to set it manually). - aliases: ['gid'] - type: str - group: - description: - - List of group names assigned to this group. - - If I(append=no) and an empty list is passed all groups will be removed from this group. - - Groups that are already assigned but not passed will be removed. - - If I(append=yes) the listed groups will be assigned without removing other groups. - - If option is omitted assigned groups will not be checked or changed. - type: list - elements: str - nonposix: - description: - - Create as a non-POSIX group. - type: bool - user: - description: - - List of user names assigned to this group. - - If I(append=no) and an empty list is passed all users will be removed from this group. - - Users that are already assigned but not passed will be removed. - - If I(append=yes) the listed users will be assigned without removing other users. - - If option is omitted assigned users will not be checked or changed. - type: list - elements: str - state: - description: - - State to ensure - default: "present" - choices: ["absent", "present"] - type: str -extends_documentation_fragment: -- community.general.ipa.documentation - -''' - -EXAMPLES = r''' -- name: Ensure group is present - community.general.ipa_group: - name: oinstall - gidnumber: '54321' - state: present - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure that groups sysops and appops are assigned to ops but no other group - community.general.ipa_group: - name: ops - group: - - sysops - - appops - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure that users linus and larry are assign to the group, but no other user - community.general.ipa_group: - name: sysops - user: - - linus - - larry - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure that new starter named john is member of the group, without removing other members - community.general.ipa_group: - name: developers - user: - - john - append: yes - state: present - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure group is absent - community.general.ipa_group: - name: sysops - state: absent - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret -''' - -RETURN = r''' -group: - description: Group as returned by IPA API - returned: always - type: dict -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class GroupIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(GroupIPAClient, self).__init__(module, host, port, protocol) - - def group_find(self, name): - return self._post_json(method='group_find', name=None, item={'all': True, 'cn': name}) - - def group_add(self, name, item): - return self._post_json(method='group_add', name=name, item=item) - - def group_mod(self, name, item): - return self._post_json(method='group_mod', name=name, item=item) - - def group_del(self, name): - return self._post_json(method='group_del', name=name) - - def group_add_member(self, name, item): - return self._post_json(method='group_add_member', name=name, item=item) - - def group_add_member_group(self, name, item): - return self.group_add_member(name=name, item={'group': item}) - - def group_add_member_user(self, name, item): - return self.group_add_member(name=name, item={'user': item}) - - def group_remove_member(self, name, item): - return self._post_json(method='group_remove_member', name=name, item=item) - - def group_remove_member_group(self, name, item): - return self.group_remove_member(name=name, item={'group': item}) - - def group_remove_member_user(self, name, item): - return self.group_remove_member(name=name, item={'user': item}) - - -def get_group_dict(description=None, external=None, gid=None, nonposix=None): - group = {} - if description is not None: - group['description'] = description - if external is not None: - group['external'] = external - if gid is not None: - group['gidnumber'] = gid - if nonposix is not None: - group['nonposix'] = nonposix - return group - - -def get_group_diff(client, ipa_group, module_group): - data = [] - # With group_add attribute nonposix is passed, whereas with group_mod only posix can be passed. - if 'nonposix' in module_group: - # Only non-posix groups can be changed to posix - if not module_group['nonposix'] and ipa_group.get('nonposix'): - module_group['posix'] = True - del module_group['nonposix'] - - if 'external' in module_group: - if module_group['external'] and 'ipaexternalgroup' in ipa_group.get('objectclass'): - del module_group['external'] - - return client.get_diff(ipa_data=ipa_group, module_data=module_group) - - -def ensure(module, client): - state = module.params['state'] - name = module.params['cn'] - group = module.params['group'] - user = module.params['user'] - append = module.params['append'] - - module_group = get_group_dict(description=module.params['description'], external=module.params['external'], - gid=module.params['gidnumber'], nonposix=module.params['nonposix']) - ipa_group = client.group_find(name=name) - - changed = False - if state == 'present': - if not ipa_group: - changed = True - if not module.check_mode: - ipa_group = client.group_add(name, item=module_group) - else: - diff = get_group_diff(client, ipa_group, module_group) - if len(diff) > 0: - changed = True - if not module.check_mode: - data = {} - for key in diff: - data[key] = module_group.get(key) - client.group_mod(name=name, item=data) - - if group is not None: - changed = client.modify_if_diff(name, ipa_group.get('member_group', []), group, - client.group_add_member_group, - client.group_remove_member_group, - append=append) or changed - - if user is not None: - changed = client.modify_if_diff(name, ipa_group.get('member_user', []), user, - client.group_add_member_user, - client.group_remove_member_user, - append=append) or changed - - else: - if ipa_group: - changed = True - if not module.check_mode: - client.group_del(name) - - return changed, client.group_find(name=name) - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), - description=dict(type='str'), - external=dict(type='bool'), - gidnumber=dict(type='str', aliases=['gid']), - group=dict(type='list', elements='str'), - nonposix=dict(type='bool'), - state=dict(type='str', default='present', choices=['present', 'absent']), - user=dict(type='list', elements='str'), - append=dict(type='bool', default=False)) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - ) - - client = GroupIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) - try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) - changed, group = ensure(module, client) - module.exit_json(changed=changed, group=group) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_hbacrule.py b/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_hbacrule.py deleted file mode 100644 index 5f0704d5..00000000 --- a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_hbacrule.py +++ /dev/null @@ -1,355 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_hbacrule -author: Thomas Krahn (@Nosmoht) -short_description: Manage FreeIPA HBAC rule -description: -- Add, modify or delete an IPA HBAC rule using IPA API. -options: - cn: - description: - - Canonical name. - - Can not be changed as it is the unique identifier. - required: true - aliases: ["name"] - type: str - description: - description: Description - type: str - host: - description: - - List of host names to assign. - - If an empty list is passed all hosts will be removed from the rule. - - If option is omitted hosts will not be checked or changed. - required: false - type: list - elements: str - hostcategory: - description: Host category - choices: ['all'] - type: str - hostgroup: - description: - - List of hostgroup names to assign. - - If an empty list is passed all hostgroups will be removed. from the rule - - If option is omitted hostgroups will not be checked or changed. - type: list - elements: str - service: - description: - - List of service names to assign. - - If an empty list is passed all services will be removed from the rule. - - If option is omitted services will not be checked or changed. - type: list - elements: str - servicecategory: - description: Service category - choices: ['all'] - type: str - servicegroup: - description: - - List of service group names to assign. - - If an empty list is passed all assigned service groups will be removed from the rule. - - If option is omitted service groups will not be checked or changed. - type: list - elements: str - sourcehost: - description: - - List of source host names to assign. - - If an empty list if passed all assigned source hosts will be removed from the rule. - - If option is omitted source hosts will not be checked or changed. - type: list - elements: str - sourcehostcategory: - description: Source host category - choices: ['all'] - type: str - sourcehostgroup: - description: - - List of source host group names to assign. - - If an empty list if passed all assigned source host groups will be removed from the rule. - - If option is omitted source host groups will not be checked or changed. - type: list - elements: str - state: - description: State to ensure - default: "present" - choices: ["absent", "disabled", "enabled","present"] - type: str - user: - description: - - List of user names to assign. - - If an empty list if passed all assigned users will be removed from the rule. - - If option is omitted users will not be checked or changed. - type: list - elements: str - usercategory: - description: User category - choices: ['all'] - type: str - usergroup: - description: - - List of user group names to assign. - - If an empty list if passed all assigned user groups will be removed from the rule. - - If option is omitted user groups will not be checked or changed. - type: list - elements: str -extends_documentation_fragment: -- community.general.ipa.documentation - -''' - -EXAMPLES = r''' -- name: Ensure rule to allow all users to access any host from any host - community.general.ipa_hbacrule: - name: allow_all - description: Allow all users to access any host from any host - hostcategory: all - servicecategory: all - usercategory: all - state: present - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure rule with certain limitations - community.general.ipa_hbacrule: - name: allow_all_developers_access_to_db - description: Allow all developers to access any database from any host - hostgroup: - - db-server - usergroup: - - developers - state: present - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure rule is absent - community.general.ipa_hbacrule: - name: rule_to_be_deleted - state: absent - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret -''' - -RETURN = r''' -hbacrule: - description: HBAC rule as returned by IPA API. - returned: always - type: dict -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class HBACRuleIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(HBACRuleIPAClient, self).__init__(module, host, port, protocol) - - def hbacrule_find(self, name): - return self._post_json(method='hbacrule_find', name=None, item={'all': True, 'cn': name}) - - def hbacrule_add(self, name, item): - return self._post_json(method='hbacrule_add', name=name, item=item) - - def hbacrule_mod(self, name, item): - return self._post_json(method='hbacrule_mod', name=name, item=item) - - def hbacrule_del(self, name): - return self._post_json(method='hbacrule_del', name=name) - - def hbacrule_add_host(self, name, item): - return self._post_json(method='hbacrule_add_host', name=name, item=item) - - def hbacrule_remove_host(self, name, item): - return self._post_json(method='hbacrule_remove_host', name=name, item=item) - - def hbacrule_add_service(self, name, item): - return self._post_json(method='hbacrule_add_service', name=name, item=item) - - def hbacrule_remove_service(self, name, item): - return self._post_json(method='hbacrule_remove_service', name=name, item=item) - - def hbacrule_add_user(self, name, item): - return self._post_json(method='hbacrule_add_user', name=name, item=item) - - def hbacrule_remove_user(self, name, item): - return self._post_json(method='hbacrule_remove_user', name=name, item=item) - - def hbacrule_add_sourcehost(self, name, item): - return self._post_json(method='hbacrule_add_sourcehost', name=name, item=item) - - def hbacrule_remove_sourcehost(self, name, item): - return self._post_json(method='hbacrule_remove_sourcehost', name=name, item=item) - - -def get_hbacrule_dict(description=None, hostcategory=None, ipaenabledflag=None, servicecategory=None, - sourcehostcategory=None, - usercategory=None): - data = {} - if description is not None: - data['description'] = description - if hostcategory is not None: - data['hostcategory'] = hostcategory - if ipaenabledflag is not None: - data['ipaenabledflag'] = ipaenabledflag - if servicecategory is not None: - data['servicecategory'] = servicecategory - if sourcehostcategory is not None: - data['sourcehostcategory'] = sourcehostcategory - if usercategory is not None: - data['usercategory'] = usercategory - return data - - -def get_hbcarule_diff(client, ipa_hbcarule, module_hbcarule): - return client.get_diff(ipa_data=ipa_hbcarule, module_data=module_hbcarule) - - -def ensure(module, client): - name = module.params['cn'] - state = module.params['state'] - - if state in ['present', 'enabled']: - ipaenabledflag = 'TRUE' - else: - ipaenabledflag = 'FALSE' - - host = module.params['host'] - hostcategory = module.params['hostcategory'] - hostgroup = module.params['hostgroup'] - service = module.params['service'] - servicecategory = module.params['servicecategory'] - servicegroup = module.params['servicegroup'] - sourcehost = module.params['sourcehost'] - sourcehostcategory = module.params['sourcehostcategory'] - sourcehostgroup = module.params['sourcehostgroup'] - user = module.params['user'] - usercategory = module.params['usercategory'] - usergroup = module.params['usergroup'] - - module_hbacrule = get_hbacrule_dict(description=module.params['description'], - hostcategory=hostcategory, - ipaenabledflag=ipaenabledflag, - servicecategory=servicecategory, - sourcehostcategory=sourcehostcategory, - usercategory=usercategory) - ipa_hbacrule = client.hbacrule_find(name=name) - - changed = False - if state in ['present', 'enabled', 'disabled']: - if not ipa_hbacrule: - changed = True - if not module.check_mode: - ipa_hbacrule = client.hbacrule_add(name=name, item=module_hbacrule) - else: - diff = get_hbcarule_diff(client, ipa_hbacrule, module_hbacrule) - if len(diff) > 0: - changed = True - if not module.check_mode: - data = {} - for key in diff: - data[key] = module_hbacrule.get(key) - client.hbacrule_mod(name=name, item=data) - - if host is not None: - changed = client.modify_if_diff(name, ipa_hbacrule.get('memberhost_host', []), host, - client.hbacrule_add_host, - client.hbacrule_remove_host, 'host') or changed - - if hostgroup is not None: - changed = client.modify_if_diff(name, ipa_hbacrule.get('memberhost_hostgroup', []), hostgroup, - client.hbacrule_add_host, - client.hbacrule_remove_host, 'hostgroup') or changed - - if service is not None: - changed = client.modify_if_diff(name, ipa_hbacrule.get('memberservice_hbacsvc', []), service, - client.hbacrule_add_service, - client.hbacrule_remove_service, 'hbacsvc') or changed - - if servicegroup is not None: - changed = client.modify_if_diff(name, ipa_hbacrule.get('memberservice_hbacsvcgroup', []), - servicegroup, - client.hbacrule_add_service, - client.hbacrule_remove_service, 'hbacsvcgroup') or changed - - if sourcehost is not None: - changed = client.modify_if_diff(name, ipa_hbacrule.get('sourcehost_host', []), sourcehost, - client.hbacrule_add_sourcehost, - client.hbacrule_remove_sourcehost, 'host') or changed - - if sourcehostgroup is not None: - changed = client.modify_if_diff(name, ipa_hbacrule.get('sourcehost_group', []), sourcehostgroup, - client.hbacrule_add_sourcehost, - client.hbacrule_remove_sourcehost, 'hostgroup') or changed - - if user is not None: - changed = client.modify_if_diff(name, ipa_hbacrule.get('memberuser_user', []), user, - client.hbacrule_add_user, - client.hbacrule_remove_user, 'user') or changed - - if usergroup is not None: - changed = client.modify_if_diff(name, ipa_hbacrule.get('memberuser_group', []), usergroup, - client.hbacrule_add_user, - client.hbacrule_remove_user, 'group') or changed - else: - if ipa_hbacrule: - changed = True - if not module.check_mode: - client.hbacrule_del(name=name) - - return changed, client.hbacrule_find(name=name) - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), - description=dict(type='str'), - host=dict(type='list', elements='str'), - hostcategory=dict(type='str', choices=['all']), - hostgroup=dict(type='list', elements='str'), - service=dict(type='list', elements='str'), - servicecategory=dict(type='str', choices=['all']), - servicegroup=dict(type='list', elements='str'), - sourcehost=dict(type='list', elements='str'), - sourcehostcategory=dict(type='str', choices=['all']), - sourcehostgroup=dict(type='list', elements='str'), - state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), - user=dict(type='list', elements='str'), - usercategory=dict(type='str', choices=['all']), - usergroup=dict(type='list', elements='str')) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True - ) - - client = HBACRuleIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) - - try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) - changed, hbacrule = ensure(module, client) - module.exit_json(changed=changed, hbacrule=hbacrule) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_host.py b/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_host.py deleted file mode 100644 index 25c65f0b..00000000 --- a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_host.py +++ /dev/null @@ -1,305 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_host -author: Thomas Krahn (@Nosmoht) -short_description: Manage FreeIPA host -description: -- Add, modify and delete an IPA host using IPA API. -options: - fqdn: - description: - - Full qualified domain name. - - Can not be changed as it is the unique identifier. - required: true - aliases: ["name"] - type: str - description: - description: - - A description of this host. - type: str - force: - description: - - Force host name even if not in DNS. - required: false - type: bool - ip_address: - description: - - Add the host to DNS with this IP address. - type: str - mac_address: - description: - - List of Hardware MAC address(es) off this host. - - If option is omitted MAC addresses will not be checked or changed. - - If an empty list is passed all assigned MAC addresses will be removed. - - MAC addresses that are already assigned but not passed will be removed. - aliases: ["macaddress"] - type: list - elements: str - ns_host_location: - description: - - Host location (e.g. "Lab 2") - aliases: ["nshostlocation"] - type: str - ns_hardware_platform: - description: - - Host hardware platform (e.g. "Lenovo T61") - aliases: ["nshardwareplatform"] - type: str - ns_os_version: - description: - - Host operating system and version (e.g. "Fedora 9") - aliases: ["nsosversion"] - type: str - user_certificate: - description: - - List of Base-64 encoded server certificates. - - If option is omitted certificates will not be checked or changed. - - If an empty list is passed all assigned certificates will be removed. - - Certificates already assigned but not passed will be removed. - aliases: ["usercertificate"] - type: list - elements: str - state: - description: State to ensure. - default: present - choices: ["absent", "disabled", "enabled", "present"] - type: str - update_dns: - description: - - If set C("True") with state as C("absent"), then removes DNS records of the host managed by FreeIPA DNS. - - This option has no effect for states other than "absent". - type: bool - random_password: - description: Generate a random password to be used in bulk enrollment. - type: bool -extends_documentation_fragment: -- community.general.ipa.documentation - -''' - -EXAMPLES = r''' -- name: Ensure host is present - community.general.ipa_host: - name: host01.example.com - description: Example host - ip_address: 192.168.0.123 - ns_host_location: Lab - ns_os_version: CentOS 7 - ns_hardware_platform: Lenovo T61 - mac_address: - - "08:00:27:E3:B1:2D" - - "52:54:00:BD:97:1E" - state: present - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Generate a random password for bulk enrolment - community.general.ipa_host: - name: host01.example.com - description: Example host - ip_address: 192.168.0.123 - state: present - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - validate_certs: False - random_password: True - -- name: Ensure host is disabled - community.general.ipa_host: - name: host01.example.com - state: disabled - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure that all user certificates are removed - community.general.ipa_host: - name: host01.example.com - user_certificate: [] - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure host is absent - community.general.ipa_host: - name: host01.example.com - state: absent - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure host and its DNS record is absent - community.general.ipa_host: - name: host01.example.com - state: absent - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - update_dns: True -''' - -RETURN = r''' -host: - description: Host as returned by IPA API. - returned: always - type: dict -host_diff: - description: List of options that differ and would be changed - returned: if check mode and a difference is found - type: list -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class HostIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(HostIPAClient, self).__init__(module, host, port, protocol) - - def host_show(self, name): - return self._post_json(method='host_show', name=name) - - def host_find(self, name): - return self._post_json(method='host_find', name=None, item={'all': True, 'fqdn': name}) - - def host_add(self, name, host): - return self._post_json(method='host_add', name=name, item=host) - - def host_mod(self, name, host): - return self._post_json(method='host_mod', name=name, item=host) - - def host_del(self, name, update_dns): - return self._post_json(method='host_del', name=name, item={'updatedns': update_dns}) - - def host_disable(self, name): - return self._post_json(method='host_disable', name=name) - - -def get_host_dict(description=None, force=None, ip_address=None, ns_host_location=None, ns_hardware_platform=None, - ns_os_version=None, user_certificate=None, mac_address=None, random_password=None): - data = {} - if description is not None: - data['description'] = description - if force is not None: - data['force'] = force - if ip_address is not None: - data['ip_address'] = ip_address - if ns_host_location is not None: - data['nshostlocation'] = ns_host_location - if ns_hardware_platform is not None: - data['nshardwareplatform'] = ns_hardware_platform - if ns_os_version is not None: - data['nsosversion'] = ns_os_version - if user_certificate is not None: - data['usercertificate'] = [{"__base64__": item} for item in user_certificate] - if mac_address is not None: - data['macaddress'] = mac_address - if random_password is not None: - data['random'] = random_password - return data - - -def get_host_diff(client, ipa_host, module_host): - non_updateable_keys = ['force', 'ip_address'] - if not module_host.get('random'): - non_updateable_keys.append('random') - for key in non_updateable_keys: - if key in module_host: - del module_host[key] - - return client.get_diff(ipa_data=ipa_host, module_data=module_host) - - -def ensure(module, client): - name = module.params['fqdn'] - state = module.params['state'] - - ipa_host = client.host_find(name=name) - module_host = get_host_dict(description=module.params['description'], - force=module.params['force'], ip_address=module.params['ip_address'], - ns_host_location=module.params['ns_host_location'], - ns_hardware_platform=module.params['ns_hardware_platform'], - ns_os_version=module.params['ns_os_version'], - user_certificate=module.params['user_certificate'], - mac_address=module.params['mac_address'], - random_password=module.params.get('random_password'), - ) - changed = False - if state in ['present', 'enabled', 'disabled']: - if not ipa_host: - changed = True - if not module.check_mode: - # OTP password generated by FreeIPA is visible only for host_add command - # so, return directly from here. - return changed, client.host_add(name=name, host=module_host) - else: - diff = get_host_diff(client, ipa_host, module_host) - if len(diff) > 0: - changed = True - if not module.check_mode: - data = {} - for key in diff: - data[key] = module_host.get(key) - ipa_host_show = client.host_show(name=name) - if ipa_host_show.get('has_keytab', False) and module.params.get('random_password'): - client.host_disable(name=name) - return changed, client.host_mod(name=name, host=data) - - else: - if ipa_host: - changed = True - update_dns = module.params.get('update_dns', False) - if not module.check_mode: - client.host_del(name=name, update_dns=update_dns) - - return changed, client.host_find(name=name) - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update(description=dict(type='str'), - fqdn=dict(type='str', required=True, aliases=['name']), - force=dict(type='bool'), - ip_address=dict(type='str'), - ns_host_location=dict(type='str', aliases=['nshostlocation']), - ns_hardware_platform=dict(type='str', aliases=['nshardwareplatform']), - ns_os_version=dict(type='str', aliases=['nsosversion']), - user_certificate=dict(type='list', aliases=['usercertificate'], elements='str'), - mac_address=dict(type='list', aliases=['macaddress'], elements='str'), - update_dns=dict(type='bool'), - state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), - random_password=dict(type='bool', no_log=False),) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) - - client = HostIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) - - try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) - changed, host = ensure(module, client) - module.exit_json(changed=changed, host=host) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_hostgroup.py b/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_hostgroup.py deleted file mode 100644 index 9d5c6f99..00000000 --- a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_hostgroup.py +++ /dev/null @@ -1,208 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_hostgroup -author: Thomas Krahn (@Nosmoht) -short_description: Manage FreeIPA host-group -description: -- Add, modify and delete an IPA host-group using IPA API. -options: - cn: - description: - - Name of host-group. - - Can not be changed as it is the unique identifier. - required: true - aliases: ["name"] - type: str - description: - description: - - Description. - type: str - host: - description: - - List of hosts that belong to the host-group. - - If an empty list is passed all hosts will be removed from the group. - - If option is omitted hosts will not be checked or changed. - - If option is passed all assigned hosts that are not passed will be unassigned from the group. - type: list - elements: str - hostgroup: - description: - - List of host-groups than belong to that host-group. - - If an empty list is passed all host-groups will be removed from the group. - - If option is omitted host-groups will not be checked or changed. - - If option is passed all assigned hostgroups that are not passed will be unassigned from the group. - type: list - elements: str - state: - description: - - State to ensure. - default: "present" - choices: ["absent", "disabled", "enabled", "present"] - type: str -extends_documentation_fragment: -- community.general.ipa.documentation - -''' - -EXAMPLES = r''' -- name: Ensure host-group databases is present - community.general.ipa_hostgroup: - name: databases - state: present - host: - - db.example.com - hostgroup: - - mysql-server - - oracle-server - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure host-group databases is absent - community.general.ipa_hostgroup: - name: databases - state: absent - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret -''' - -RETURN = r''' -hostgroup: - description: Hostgroup as returned by IPA API. - returned: always - type: dict -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class HostGroupIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(HostGroupIPAClient, self).__init__(module, host, port, protocol) - - def hostgroup_find(self, name): - return self._post_json(method='hostgroup_find', name=None, item={'all': True, 'cn': name}) - - def hostgroup_add(self, name, item): - return self._post_json(method='hostgroup_add', name=name, item=item) - - def hostgroup_mod(self, name, item): - return self._post_json(method='hostgroup_mod', name=name, item=item) - - def hostgroup_del(self, name): - return self._post_json(method='hostgroup_del', name=name) - - def hostgroup_add_member(self, name, item): - return self._post_json(method='hostgroup_add_member', name=name, item=item) - - def hostgroup_add_host(self, name, item): - return self.hostgroup_add_member(name=name, item={'host': item}) - - def hostgroup_add_hostgroup(self, name, item): - return self.hostgroup_add_member(name=name, item={'hostgroup': item}) - - def hostgroup_remove_member(self, name, item): - return self._post_json(method='hostgroup_remove_member', name=name, item=item) - - def hostgroup_remove_host(self, name, item): - return self.hostgroup_remove_member(name=name, item={'host': item}) - - def hostgroup_remove_hostgroup(self, name, item): - return self.hostgroup_remove_member(name=name, item={'hostgroup': item}) - - -def get_hostgroup_dict(description=None): - data = {} - if description is not None: - data['description'] = description - return data - - -def get_hostgroup_diff(client, ipa_hostgroup, module_hostgroup): - return client.get_diff(ipa_data=ipa_hostgroup, module_data=module_hostgroup) - - -def ensure(module, client): - name = module.params['cn'] - state = module.params['state'] - host = module.params['host'] - hostgroup = module.params['hostgroup'] - - ipa_hostgroup = client.hostgroup_find(name=name) - module_hostgroup = get_hostgroup_dict(description=module.params['description']) - - changed = False - if state == 'present': - if not ipa_hostgroup: - changed = True - if not module.check_mode: - ipa_hostgroup = client.hostgroup_add(name=name, item=module_hostgroup) - else: - diff = get_hostgroup_diff(client, ipa_hostgroup, module_hostgroup) - if len(diff) > 0: - changed = True - if not module.check_mode: - data = {} - for key in diff: - data[key] = module_hostgroup.get(key) - client.hostgroup_mod(name=name, item=data) - - if host is not None: - changed = client.modify_if_diff(name, ipa_hostgroup.get('member_host', []), [item.lower() for item in host], - client.hostgroup_add_host, client.hostgroup_remove_host) or changed - - if hostgroup is not None: - changed = client.modify_if_diff(name, ipa_hostgroup.get('member_hostgroup', []), - [item.lower() for item in hostgroup], - client.hostgroup_add_hostgroup, - client.hostgroup_remove_hostgroup) or changed - - else: - if ipa_hostgroup: - changed = True - if not module.check_mode: - client.hostgroup_del(name=name) - - return changed, client.hostgroup_find(name=name) - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), - description=dict(type='str'), - host=dict(type='list', elements='str'), - hostgroup=dict(type='list', elements='str'), - state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled'])) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) - - client = HostGroupIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) - - try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) - changed, hostgroup = ensure(module, client) - module.exit_json(changed=changed, hostgroup=hostgroup) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_otpconfig.py b/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_otpconfig.py deleted file mode 100644 index 9a10baec..00000000 --- a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_otpconfig.py +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Ansible Project -# Heavily influenced from Fran Fitzpatrick ipa_config module -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_otpconfig -author: justchris1 (@justchris1) -short_description: Manage FreeIPA OTP Configuration Settings -version_added: 2.5.0 -description: -- Modify global configuration settings of a FreeIPA Server with respect to OTP (One Time Passwords). -options: - ipatokentotpauthwindow: - description: TOTP authentication window in seconds. - aliases: ["totpauthwindow"] - type: int - ipatokentotpsyncwindow: - description: TOTP synchronization window in seconds. - aliases: ["totpsyncwindow"] - type: int - ipatokenhotpauthwindow: - description: HOTP authentication window in number of hops. - aliases: ["hotpauthwindow"] - type: int - ipatokenhotpsyncwindow: - description: HOTP synchronization window in hops. - aliases: ["hotpsyncwindow"] - type: int -extends_documentation_fragment: -- community.general.ipa.documentation - -''' - -EXAMPLES = r''' -- name: Ensure the TOTP authentication window is set to 300 seconds - community.general.ipa_otpconfig: - ipatokentotpauthwindow: '300' - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret - -- name: Ensure the TOTP syncronization window is set to 86400 seconds - community.general.ipa_otpconfig: - ipatokentotpsyncwindow: '86400' - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret - -- name: Ensure the HOTP authentication window is set to 10 hops - community.general.ipa_otpconfig: - ipatokenhotpauthwindow: '10' - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret - -- name: Ensure the HOTP syncronization window is set to 100 hops - community.general.ipa_otpconfig: - ipatokenhotpsyncwindow: '100' - ipa_host: localhost - ipa_user: admin - ipa_pass: supersecret -''' - -RETURN = r''' -otpconfig: - description: OTP configuration as returned by IPA API. - returned: always - type: dict -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class OTPConfigIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(OTPConfigIPAClient, self).__init__(module, host, port, protocol) - - def otpconfig_show(self): - return self._post_json(method='otpconfig_show', name=None) - - def otpconfig_mod(self, name, item): - return self._post_json(method='otpconfig_mod', name=name, item=item) - - -def get_otpconfig_dict(ipatokentotpauthwindow=None, ipatokentotpsyncwindow=None, - ipatokenhotpauthwindow=None, ipatokenhotpsyncwindow=None): - - config = {} - if ipatokentotpauthwindow is not None: - config['ipatokentotpauthwindow'] = str(ipatokentotpauthwindow) - if ipatokentotpsyncwindow is not None: - config['ipatokentotpsyncwindow'] = str(ipatokentotpsyncwindow) - if ipatokenhotpauthwindow is not None: - config['ipatokenhotpauthwindow'] = str(ipatokenhotpauthwindow) - if ipatokenhotpsyncwindow is not None: - config['ipatokenhotpsyncwindow'] = str(ipatokenhotpsyncwindow) - - return config - - -def get_otpconfig_diff(client, ipa_config, module_config): - return client.get_diff(ipa_data=ipa_config, module_data=module_config) - - -def ensure(module, client): - module_otpconfig = get_otpconfig_dict( - ipatokentotpauthwindow=module.params.get('ipatokentotpauthwindow'), - ipatokentotpsyncwindow=module.params.get('ipatokentotpsyncwindow'), - ipatokenhotpauthwindow=module.params.get('ipatokenhotpauthwindow'), - ipatokenhotpsyncwindow=module.params.get('ipatokenhotpsyncwindow'), - ) - ipa_otpconfig = client.otpconfig_show() - diff = get_otpconfig_diff(client, ipa_otpconfig, module_otpconfig) - - changed = False - new_otpconfig = {} - for module_key in diff: - if module_otpconfig.get(module_key) != ipa_otpconfig.get(module_key, None): - changed = True - new_otpconfig.update({module_key: module_otpconfig.get(module_key)}) - - if changed and not module.check_mode: - client.otpconfig_mod(name=None, item=new_otpconfig) - - return changed, client.otpconfig_show() - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update( - ipatokentotpauthwindow=dict(type='int', aliases=['totpauthwindow'], no_log=False), - ipatokentotpsyncwindow=dict(type='int', aliases=['totpsyncwindow'], no_log=False), - ipatokenhotpauthwindow=dict(type='int', aliases=['hotpauthwindow'], no_log=False), - ipatokenhotpsyncwindow=dict(type='int', aliases=['hotpsyncwindow'], no_log=False), - ) - - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True - ) - - client = OTPConfigIPAClient( - module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot'] - ) - - try: - client.login( - username=module.params['ipa_user'], - password=module.params['ipa_pass'] - ) - changed, otpconfig = ensure(module, client) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - module.exit_json(changed=changed, otpconfig=otpconfig) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_otptoken.py b/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_otptoken.py deleted file mode 100644 index 4027a1c4..00000000 --- a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_otptoken.py +++ /dev/null @@ -1,527 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_otptoken -author: justchris1 (@justchris1) -short_description: Manage FreeIPA OTPs -version_added: 2.5.0 -description: -- Add, modify, and delete One Time Passwords in IPA. -options: - uniqueid: - description: Unique ID of the token in IPA. - required: true - aliases: ["name"] - type: str - newuniqueid: - description: If specified, the unique id specified will be changed to this. - type: str - otptype: - description: - - Type of OTP. - - "B(Note:) Cannot be modified after OTP is created." - type: str - choices: [ totp, hotp ] - secretkey: - description: - - Token secret (Base64). - - If OTP is created and this is not specified, a random secret will be generated by IPA. - - "B(Note:) Cannot be modified after OTP is created." - type: str - description: - description: Description of the token (informational only). - type: str - owner: - description: Assigned user of the token. - type: str - enabled: - description: Mark the token as enabled (default C(true)). - default: true - type: bool - notbefore: - description: - - First date/time the token can be used. - - In the format C(YYYYMMddHHmmss). - - For example, C(20180121182022) will allow the token to be used starting on 21 January 2018 at 18:20:22. - type: str - notafter: - description: - - Last date/time the token can be used. - - In the format C(YYYYMMddHHmmss). - - For example, C(20200121182022) will allow the token to be used until 21 January 2020 at 18:20:22. - type: str - vendor: - description: Token vendor name (informational only). - type: str - model: - description: Token model (informational only). - type: str - serial: - description: Token serial (informational only). - type: str - state: - description: State to ensure. - choices: ['present', 'absent'] - default: 'present' - type: str - algorithm: - description: - - Token hash algorithm. - - "B(Note:) Cannot be modified after OTP is created." - choices: ['sha1', 'sha256', 'sha384', 'sha512'] - type: str - digits: - description: - - Number of digits each token code will have. - - "B(Note:) Cannot be modified after OTP is created." - choices: [ 6, 8 ] - type: int - offset: - description: - - TOTP token / IPA server time difference. - - "B(Note:) Cannot be modified after OTP is created." - type: int - interval: - description: - - Length of TOTP token code validity in seconds. - - "B(Note:) Cannot be modified after OTP is created." - type: int - counter: - description: - - Initial counter for the HOTP token. - - "B(Note:) Cannot be modified after OTP is created." - type: int -extends_documentation_fragment: -- community.general.ipa.documentation -''' - -EXAMPLES = r''' -- name: Create a totp for pinky, allowing the IPA server to generate using defaults - community.general.ipa_otptoken: - uniqueid: Token123 - otptype: totp - owner: pinky - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Create a 8 digit hotp for pinky with sha256 with specified validity times - community.general.ipa_otptoken: - uniqueid: Token123 - enabled: true - otptype: hotp - digits: 8 - secretkey: UMKSIER00zT2T2tWMUlTRmNlekRCbFQvWFBVZUh2dElHWGR6T3VUR3IzK2xjaFk9 - algorithm: sha256 - notbefore: 20180121182123 - notafter: 20220121182123 - owner: pinky - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Update Token123 to indicate a vendor, model, serial number (info only), and description - community.general.ipa_otptoken: - uniqueid: Token123 - vendor: Acme - model: acme101 - serial: SerialNumber1 - description: Acme OTP device - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Disable Token123 - community.general.ipa_otptoken: - uniqueid: Token123 - enabled: false - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Rename Token123 to TokenABC and enable it - community.general.ipa_otptoken: - uniqueid: Token123 - newuniqueid: TokenABC - enabled: true - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret -''' - -RETURN = r''' -otptoken: - description: OTP Token as returned by IPA API - returned: always - type: dict -''' - -import base64 -import traceback - -from ansible.module_utils.basic import AnsibleModule, sanitize_keys -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class OTPTokenIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(OTPTokenIPAClient, self).__init__(module, host, port, protocol) - - def otptoken_find(self, name): - return self._post_json(method='otptoken_find', name=None, item={'all': True, - 'ipatokenuniqueid': name, - 'timelimit': '0', - 'sizelimit': '0'}) - - def otptoken_add(self, name, item): - return self._post_json(method='otptoken_add', name=name, item=item) - - def otptoken_mod(self, name, item): - return self._post_json(method='otptoken_mod', name=name, item=item) - - def otptoken_del(self, name): - return self._post_json(method='otptoken_del', name=name) - - -def base64_to_base32(base64_string): - """Converts base64 string to base32 string""" - b32_string = base64.b32encode(base64.b64decode(base64_string)).decode('ascii') - return b32_string - - -def base32_to_base64(base32_string): - """Converts base32 string to base64 string""" - b64_string = base64.b64encode(base64.b32decode(base32_string)).decode('ascii') - return b64_string - - -def get_otptoken_dict(ansible_to_ipa, uniqueid=None, newuniqueid=None, otptype=None, secretkey=None, description=None, owner=None, - enabled=None, notbefore=None, notafter=None, vendor=None, - model=None, serial=None, algorithm=None, digits=None, offset=None, - interval=None, counter=None): - """Create the dictionary of settings passed in""" - - otptoken = {} - if uniqueid is not None: - otptoken[ansible_to_ipa['uniqueid']] = uniqueid - if newuniqueid is not None: - otptoken[ansible_to_ipa['newuniqueid']] = newuniqueid - if otptype is not None: - otptoken[ansible_to_ipa['otptype']] = otptype.upper() - if secretkey is not None: - # For some unknown reason, while IPA returns the secret in base64, - # it wants the secret passed in as base32. This makes it more difficult - # for comparison (does 'current' equal to 'new'). Moreover, this may - # cause some subtle issue in a playbook as the output is encoded - # in a different way than if it was passed in as a parameter. For - # these reasons, have the module standardize on base64 input (as parameter) - # and output (from IPA). - otptoken[ansible_to_ipa['secretkey']] = base64_to_base32(secretkey) - if description is not None: - otptoken[ansible_to_ipa['description']] = description - if owner is not None: - otptoken[ansible_to_ipa['owner']] = owner - if enabled is not None: - otptoken[ansible_to_ipa['enabled']] = 'FALSE' if enabled else 'TRUE' - if notbefore is not None: - otptoken[ansible_to_ipa['notbefore']] = notbefore + 'Z' - if notafter is not None: - otptoken[ansible_to_ipa['notafter']] = notafter + 'Z' - if vendor is not None: - otptoken[ansible_to_ipa['vendor']] = vendor - if model is not None: - otptoken[ansible_to_ipa['model']] = model - if serial is not None: - otptoken[ansible_to_ipa['serial']] = serial - if algorithm is not None: - otptoken[ansible_to_ipa['algorithm']] = algorithm - if digits is not None: - otptoken[ansible_to_ipa['digits']] = str(digits) - if offset is not None: - otptoken[ansible_to_ipa['offset']] = str(offset) - if interval is not None: - otptoken[ansible_to_ipa['interval']] = str(interval) - if counter is not None: - otptoken[ansible_to_ipa['counter']] = str(counter) - - return otptoken - - -def transform_output(ipa_otptoken, ansible_to_ipa, ipa_to_ansible): - """Transform the output received by IPA to a format more friendly - before it is returned to the user. IPA returns even simple - strings as a list of strings. It also returns bools and - int as string. This function cleans that up before return. - """ - updated_otptoken = ipa_otptoken - - # Used to hold values that will be sanitized from output as no_log. - # For the case where secretkey is not specified at the module, but - # is passed back from IPA. - sanitize_strings = set() - - # Rename the IPA parameters to the more friendly ansible module names for them - for ipa_parameter in ipa_to_ansible: - if ipa_parameter in ipa_otptoken: - updated_otptoken[ipa_to_ansible[ipa_parameter]] = ipa_otptoken[ipa_parameter] - updated_otptoken.pop(ipa_parameter) - - # Change the type from IPA's list of string to the appropriate return value type - # based on field. By default, assume they should be strings. - for ansible_parameter in ansible_to_ipa: - if ansible_parameter in updated_otptoken: - if isinstance(updated_otptoken[ansible_parameter], list) and len(updated_otptoken[ansible_parameter]) == 1: - if ansible_parameter in ['digits', 'offset', 'interval', 'counter']: - updated_otptoken[ansible_parameter] = int(updated_otptoken[ansible_parameter][0]) - elif ansible_parameter == 'enabled': - updated_otptoken[ansible_parameter] = bool(updated_otptoken[ansible_parameter][0]) - else: - updated_otptoken[ansible_parameter] = updated_otptoken[ansible_parameter][0] - - if 'secretkey' in updated_otptoken: - if isinstance(updated_otptoken['secretkey'], dict): - if '__base64__' in updated_otptoken['secretkey']: - sanitize_strings.add(updated_otptoken['secretkey']['__base64__']) - b64key = updated_otptoken['secretkey']['__base64__'] - updated_otptoken.pop('secretkey') - updated_otptoken['secretkey'] = b64key - sanitize_strings.add(b64key) - elif '__base32__' in updated_otptoken['secretkey']: - sanitize_strings.add(updated_otptoken['secretkey']['__base32__']) - b32key = updated_otptoken['secretkey']['__base32__'] - b64key = base32_to_base64(b32key) - updated_otptoken.pop('secretkey') - updated_otptoken['secretkey'] = b64key - sanitize_strings.add(b32key) - sanitize_strings.add(b64key) - - return updated_otptoken, sanitize_strings - - -def validate_modifications(ansible_to_ipa, module, ipa_otptoken, - module_otptoken, unmodifiable_after_creation): - """Checks to see if the requested modifications are valid. Some elements - cannot be modified after initial creation. However, we still want to - validate arguments that are specified, but are not different than what - is currently set on the server. - """ - - modifications_valid = True - - for parameter in unmodifiable_after_creation: - if ansible_to_ipa[parameter] in module_otptoken and ansible_to_ipa[parameter] in ipa_otptoken: - mod_value = module_otptoken[ansible_to_ipa[parameter]] - - # For someone unknown reason, the returns from IPA put almost all - # values in a list, even though passing them in a list (even of - # length 1) will be rejected. The module values for all elements - # other than type (totp or hotp) have this happen. - if parameter == 'otptype': - ipa_value = ipa_otptoken[ansible_to_ipa[parameter]] - else: - if len(ipa_otptoken[ansible_to_ipa[parameter]]) != 1: - module.fail_json(msg=("Invariant fail: Return value from IPA is not a list " + - "of length 1. Please open a bug report for the module.")) - if parameter == 'secretkey': - # We stored the secret key in base32 since we had assumed that would need to - # be the format if we were contacting IPA to create it. However, we are - # now comparing it against what is already set in the IPA server, so convert - # back to base64 for comparison. - mod_value = base32_to_base64(mod_value) - - # For the secret key, it is even more specific in that the key is returned - # in a dict, in the list, as the __base64__ entry for the IPA response. - ipa_value = ipa_otptoken[ansible_to_ipa[parameter]][0]['__base64__'] - if '__base64__' in ipa_otptoken[ansible_to_ipa[parameter]][0]: - ipa_value = ipa_otptoken[ansible_to_ipa[parameter]][0]['__base64__'] - elif '__base32__' in ipa_otptoken[ansible_to_ipa[parameter]][0]: - b32key = ipa_otptoken[ansible_to_ipa[parameter]][0]['__base32__'] - b64key = base32_to_base64(b32key) - ipa_value = b64key - else: - ipa_value = None - else: - ipa_value = ipa_otptoken[ansible_to_ipa[parameter]][0] - - if mod_value != ipa_value: - modifications_valid = False - fail_message = ("Parameter '" + parameter + "' cannot be changed once " + - "the OTP is created and the requested value specified here (" + - str(mod_value) + - ") differs from what is set in the IPA server (" - + str(ipa_value) + ")") - module.fail_json(msg=fail_message) - - return modifications_valid - - -def ensure(module, client): - # dict to map from ansible parameter names to attribute names - # used by IPA (which are not so friendly). - ansible_to_ipa = {'uniqueid': 'ipatokenuniqueid', - 'newuniqueid': 'rename', - 'otptype': 'type', - 'secretkey': 'ipatokenotpkey', - 'description': 'description', - 'owner': 'ipatokenowner', - 'enabled': 'ipatokendisabled', - 'notbefore': 'ipatokennotbefore', - 'notafter': 'ipatokennotafter', - 'vendor': 'ipatokenvendor', - 'model': 'ipatokenmodel', - 'serial': 'ipatokenserial', - 'algorithm': 'ipatokenotpalgorithm', - 'digits': 'ipatokenotpdigits', - 'offset': 'ipatokentotpclockoffset', - 'interval': 'ipatokentotptimestep', - 'counter': 'ipatokenhotpcounter'} - - # Create inverse dictionary for mapping return values - ipa_to_ansible = {} - for (k, v) in ansible_to_ipa.items(): - ipa_to_ansible[v] = k - - unmodifiable_after_creation = ['otptype', 'secretkey', 'algorithm', - 'digits', 'offset', 'interval', 'counter'] - state = module.params['state'] - uniqueid = module.params['uniqueid'] - - module_otptoken = get_otptoken_dict(ansible_to_ipa=ansible_to_ipa, - uniqueid=module.params.get('uniqueid'), - newuniqueid=module.params.get('newuniqueid'), - otptype=module.params.get('otptype'), - secretkey=module.params.get('secretkey'), - description=module.params.get('description'), - owner=module.params.get('owner'), - enabled=module.params.get('enabled'), - notbefore=module.params.get('notbefore'), - notafter=module.params.get('notafter'), - vendor=module.params.get('vendor'), - model=module.params.get('model'), - serial=module.params.get('serial'), - algorithm=module.params.get('algorithm'), - digits=module.params.get('digits'), - offset=module.params.get('offset'), - interval=module.params.get('interval'), - counter=module.params.get('counter')) - - ipa_otptoken = client.otptoken_find(name=uniqueid) - - if ansible_to_ipa['newuniqueid'] in module_otptoken: - # Check to see if the new unique id is already taken in use - ipa_otptoken_new = client.otptoken_find(name=module_otptoken[ansible_to_ipa['newuniqueid']]) - if ipa_otptoken_new: - module.fail_json(msg=("Requested rename through newuniqueid to " + - module_otptoken[ansible_to_ipa['newuniqueid']] + - " failed because the new unique id is already in use")) - - changed = False - if state == 'present': - if not ipa_otptoken: - changed = True - if not module.check_mode: - # It would not make sense to have a rename after creation, so if the user - # specified a newuniqueid, just replace the uniqueid with the updated one - # before creation - if ansible_to_ipa['newuniqueid'] in module_otptoken: - module_otptoken[ansible_to_ipa['uniqueid']] = module_otptoken[ansible_to_ipa['newuniqueid']] - uniqueid = module_otptoken[ansible_to_ipa['newuniqueid']] - module_otptoken.pop(ansible_to_ipa['newuniqueid']) - - # IPA wants the unique id in the first position and not as a key/value pair. - # Get rid of it from the otptoken dict and just specify it in the name field - # for otptoken_add. - if ansible_to_ipa['uniqueid'] in module_otptoken: - module_otptoken.pop(ansible_to_ipa['uniqueid']) - - module_otptoken['all'] = True - ipa_otptoken = client.otptoken_add(name=uniqueid, item=module_otptoken) - else: - if not(validate_modifications(ansible_to_ipa, module, ipa_otptoken, - module_otptoken, unmodifiable_after_creation)): - module.fail_json(msg="Modifications requested in module are not valid") - - # IPA will reject 'modifications' that do not actually modify anything - # if any of the unmodifiable elements are specified. Explicitly - # get rid of them here. They were not different or else the - # we would have failed out in validate_modifications. - for x in unmodifiable_after_creation: - if ansible_to_ipa[x] in module_otptoken: - module_otptoken.pop(ansible_to_ipa[x]) - - diff = client.get_diff(ipa_data=ipa_otptoken, module_data=module_otptoken) - if len(diff) > 0: - changed = True - if not module.check_mode: - - # IPA wants the unique id in the first position and not as a key/value pair. - # Get rid of it from the otptoken dict and just specify it in the name field - # for otptoken_mod. - if ansible_to_ipa['uniqueid'] in module_otptoken: - module_otptoken.pop(ansible_to_ipa['uniqueid']) - - module_otptoken['all'] = True - ipa_otptoken = client.otptoken_mod(name=uniqueid, item=module_otptoken) - else: - if ipa_otptoken: - changed = True - if not module.check_mode: - client.otptoken_del(name=uniqueid) - - # Transform the output to use ansible keywords (not the IPA keywords) and - # sanitize any key values in the output. - ipa_otptoken, sanitize_strings = transform_output(ipa_otptoken, ansible_to_ipa, ipa_to_ansible) - module.no_log_values = module.no_log_values.union(sanitize_strings) - sanitized_otptoken = sanitize_keys(obj=ipa_otptoken, no_log_strings=module.no_log_values) - return changed, sanitized_otptoken - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update(uniqueid=dict(type='str', aliases=['name'], required=True), - newuniqueid=dict(type='str'), - otptype=dict(type='str', choices=['totp', 'hotp']), - secretkey=dict(type='str', no_log=True), - description=dict(type='str'), - owner=dict(type='str'), - enabled=dict(type='bool', default=True), - notbefore=dict(type='str'), - notafter=dict(type='str'), - vendor=dict(type='str'), - model=dict(type='str'), - serial=dict(type='str'), - state=dict(type='str', choices=['present', 'absent'], default='present'), - algorithm=dict(type='str', choices=['sha1', 'sha256', 'sha384', 'sha512']), - digits=dict(type='int', choices=[6, 8]), - offset=dict(type='int'), - interval=dict(type='int'), - counter=dict(type='int')) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) - - client = OTPTokenIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) - - try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) - changed, otptoken = ensure(module, client) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - module.exit_json(changed=changed, otptoken=otptoken) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_pwpolicy.py b/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_pwpolicy.py deleted file mode 100644 index 0f9b141b..00000000 --- a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_pwpolicy.py +++ /dev/null @@ -1,255 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2020, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_pwpolicy -author: Adralioh (@adralioh) -short_description: Manage FreeIPA password policies -description: -- Add, modify, or delete a password policy using the IPA API. -version_added: 2.0.0 -options: - group: - description: - - Name of the group that the policy applies to. - - If omitted, the global policy is used. - aliases: ["name"] - type: str - state: - description: State to ensure. - default: "present" - choices: ["absent", "present"] - type: str - maxpwdlife: - description: Maximum password lifetime (in days). - type: str - minpwdlife: - description: Minimum password lifetime (in hours). - type: str - historylength: - description: - - Number of previous passwords that are remembered. - - Users cannot reuse remembered passwords. - type: str - minclasses: - description: Minimum number of character classes. - type: str - minlength: - description: Minimum password length. - type: str - priority: - description: - - Priority of the policy. - - High number means lower priority. - - Required when C(cn) is not the global policy. - type: str - maxfailcount: - description: Maximum number of consecutive failures before lockout. - type: str - failinterval: - description: Period (in seconds) after which the number of failed login attempts is reset. - type: str - lockouttime: - description: Period (in seconds) for which users are locked out. - type: str -extends_documentation_fragment: -- community.general.ipa.documentation -notes: -- Supports C(check_mode). -''' - -EXAMPLES = r''' -- name: Modify the global password policy - community.general.ipa_pwpolicy: - maxpwdlife: '90' - minpwdlife: '1' - historylength: '8' - minclasses: '3' - minlength: '16' - maxfailcount: '6' - failinterval: '60' - lockouttime: '600' - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure the password policy for the group admins is present - community.general.ipa_pwpolicy: - group: admins - state: present - maxpwdlife: '60' - minpwdlife: '24' - historylength: '16' - minclasses: '4' - priority: '10' - maxfailcount: '4' - failinterval: '600' - lockouttime: '1200' - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure that the group sysops does not have a unique password policy - community.general.ipa_pwpolicy: - group: sysops - state: absent - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret -''' - -RETURN = r''' -pwpolicy: - description: Password policy as returned by IPA API. - returned: always - type: dict - sample: - cn: ['admins'] - cospriority: ['10'] - dn: 'cn=admins,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com' - krbmaxpwdlife: ['60'] - krbminpwdlife: ['24'] - krbpwdfailurecountinterval: ['600'] - krbpwdhistorylength: ['16'] - krbpwdlockoutduration: ['1200'] - krbpwdmaxfailure: ['4'] - krbpwdmindiffchars: ['4'] - objectclass: ['top', 'nscontainer', 'krbpwdpolicy'] -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class PwPolicyIPAClient(IPAClient): - '''The global policy will be selected when `name` is `None`''' - def __init__(self, module, host, port, protocol): - super(PwPolicyIPAClient, self).__init__(module, host, port, protocol) - - def pwpolicy_find(self, name): - if name is None: - # Manually set the cn to the global policy because pwpolicy_find will return a random - # different policy if cn is `None` - name = 'global_policy' - return self._post_json(method='pwpolicy_find', name=None, item={'all': True, 'cn': name}) - - def pwpolicy_add(self, name, item): - return self._post_json(method='pwpolicy_add', name=name, item=item) - - def pwpolicy_mod(self, name, item): - return self._post_json(method='pwpolicy_mod', name=name, item=item) - - def pwpolicy_del(self, name): - return self._post_json(method='pwpolicy_del', name=name) - - -def get_pwpolicy_dict(maxpwdlife=None, minpwdlife=None, historylength=None, minclasses=None, - minlength=None, priority=None, maxfailcount=None, failinterval=None, - lockouttime=None): - pwpolicy = {} - if maxpwdlife is not None: - pwpolicy['krbmaxpwdlife'] = maxpwdlife - if minpwdlife is not None: - pwpolicy['krbminpwdlife'] = minpwdlife - if historylength is not None: - pwpolicy['krbpwdhistorylength'] = historylength - if minclasses is not None: - pwpolicy['krbpwdmindiffchars'] = minclasses - if minlength is not None: - pwpolicy['krbpwdminlength'] = minlength - if priority is not None: - pwpolicy['cospriority'] = priority - if maxfailcount is not None: - pwpolicy['krbpwdmaxfailure'] = maxfailcount - if failinterval is not None: - pwpolicy['krbpwdfailurecountinterval'] = failinterval - if lockouttime is not None: - pwpolicy['krbpwdlockoutduration'] = lockouttime - - return pwpolicy - - -def get_pwpolicy_diff(client, ipa_pwpolicy, module_pwpolicy): - return client.get_diff(ipa_data=ipa_pwpolicy, module_data=module_pwpolicy) - - -def ensure(module, client): - state = module.params['state'] - name = module.params['group'] - - module_pwpolicy = get_pwpolicy_dict(maxpwdlife=module.params.get('maxpwdlife'), - minpwdlife=module.params.get('minpwdlife'), - historylength=module.params.get('historylength'), - minclasses=module.params.get('minclasses'), - minlength=module.params.get('minlength'), - priority=module.params.get('priority'), - maxfailcount=module.params.get('maxfailcount'), - failinterval=module.params.get('failinterval'), - lockouttime=module.params.get('lockouttime')) - - ipa_pwpolicy = client.pwpolicy_find(name=name) - - changed = False - if state == 'present': - if not ipa_pwpolicy: - changed = True - if not module.check_mode: - ipa_pwpolicy = client.pwpolicy_add(name=name, item=module_pwpolicy) - else: - diff = get_pwpolicy_diff(client, ipa_pwpolicy, module_pwpolicy) - if len(diff) > 0: - changed = True - if not module.check_mode: - ipa_pwpolicy = client.pwpolicy_mod(name=name, item=module_pwpolicy) - else: - if ipa_pwpolicy: - changed = True - if not module.check_mode: - client.pwpolicy_del(name=name) - - return changed, ipa_pwpolicy - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update(group=dict(type='str', aliases=['name']), - state=dict(type='str', default='present', choices=['present', 'absent']), - maxpwdlife=dict(type='str'), - minpwdlife=dict(type='str'), - historylength=dict(type='str'), - minclasses=dict(type='str'), - minlength=dict(type='str'), - priority=dict(type='str'), - maxfailcount=dict(type='str'), - failinterval=dict(type='str'), - lockouttime=dict(type='str')) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) - - client = PwPolicyIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) - - try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) - changed, pwpolicy = ensure(module, client) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - module.exit_json(changed=changed, pwpolicy=pwpolicy) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_role.py b/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_role.py deleted file mode 100644 index c602614e..00000000 --- a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_role.py +++ /dev/null @@ -1,302 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_role -author: Thomas Krahn (@Nosmoht) -short_description: Manage FreeIPA role -description: -- Add, modify and delete a role within FreeIPA server using FreeIPA API. -options: - cn: - description: - - Role name. - - Can not be changed as it is the unique identifier. - required: true - aliases: ['name'] - type: str - description: - description: - - A description of this role-group. - type: str - group: - description: - - List of group names assign to this role. - - If an empty list is passed all assigned groups will be unassigned from the role. - - If option is omitted groups will not be checked or changed. - - If option is passed all assigned groups that are not passed will be unassigned from the role. - type: list - elements: str - host: - description: - - List of host names to assign. - - If an empty list is passed all assigned hosts will be unassigned from the role. - - If option is omitted hosts will not be checked or changed. - - If option is passed all assigned hosts that are not passed will be unassigned from the role. - type: list - elements: str - hostgroup: - description: - - List of host group names to assign. - - If an empty list is passed all assigned host groups will be removed from the role. - - If option is omitted host groups will not be checked or changed. - - If option is passed all assigned hostgroups that are not passed will be unassigned from the role. - type: list - elements: str - privilege: - description: - - List of privileges granted to the role. - - If an empty list is passed all assigned privileges will be removed. - - If option is omitted privileges will not be checked or changed. - - If option is passed all assigned privileges that are not passed will be removed. - type: list - elements: str - service: - description: - - List of service names to assign. - - If an empty list is passed all assigned services will be removed from the role. - - If option is omitted services will not be checked or changed. - - If option is passed all assigned services that are not passed will be removed from the role. - type: list - elements: str - state: - description: State to ensure. - default: "present" - choices: ["absent", "present"] - type: str - user: - description: - - List of user names to assign. - - If an empty list is passed all assigned users will be removed from the role. - - If option is omitted users will not be checked or changed. - type: list - elements: str -extends_documentation_fragment: -- community.general.ipa.documentation - -''' - -EXAMPLES = r''' -- name: Ensure role is present - community.general.ipa_role: - name: dba - description: Database Administrators - state: present - user: - - pinky - - brain - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure role with certain details - community.general.ipa_role: - name: another-role - description: Just another role - group: - - editors - host: - - host01.example.com - hostgroup: - - hostgroup01 - privilege: - - Group Administrators - - User Administrators - service: - - service01 - -- name: Ensure role is absent - community.general.ipa_role: - name: dba - state: absent - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret -''' - -RETURN = r''' -role: - description: Role as returned by IPA API. - returned: always - type: dict -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class RoleIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(RoleIPAClient, self).__init__(module, host, port, protocol) - - def role_find(self, name): - return self._post_json(method='role_find', name=None, item={'all': True, 'cn': name}) - - def role_add(self, name, item): - return self._post_json(method='role_add', name=name, item=item) - - def role_mod(self, name, item): - return self._post_json(method='role_mod', name=name, item=item) - - def role_del(self, name): - return self._post_json(method='role_del', name=name) - - def role_add_member(self, name, item): - return self._post_json(method='role_add_member', name=name, item=item) - - def role_add_group(self, name, item): - return self.role_add_member(name=name, item={'group': item}) - - def role_add_host(self, name, item): - return self.role_add_member(name=name, item={'host': item}) - - def role_add_hostgroup(self, name, item): - return self.role_add_member(name=name, item={'hostgroup': item}) - - def role_add_service(self, name, item): - return self.role_add_member(name=name, item={'service': item}) - - def role_add_user(self, name, item): - return self.role_add_member(name=name, item={'user': item}) - - def role_remove_member(self, name, item): - return self._post_json(method='role_remove_member', name=name, item=item) - - def role_remove_group(self, name, item): - return self.role_remove_member(name=name, item={'group': item}) - - def role_remove_host(self, name, item): - return self.role_remove_member(name=name, item={'host': item}) - - def role_remove_hostgroup(self, name, item): - return self.role_remove_member(name=name, item={'hostgroup': item}) - - def role_remove_service(self, name, item): - return self.role_remove_member(name=name, item={'service': item}) - - def role_remove_user(self, name, item): - return self.role_remove_member(name=name, item={'user': item}) - - def role_add_privilege(self, name, item): - return self._post_json(method='role_add_privilege', name=name, item={'privilege': item}) - - def role_remove_privilege(self, name, item): - return self._post_json(method='role_remove_privilege', name=name, item={'privilege': item}) - - -def get_role_dict(description=None): - data = {} - if description is not None: - data['description'] = description - return data - - -def get_role_diff(client, ipa_role, module_role): - return client.get_diff(ipa_data=ipa_role, module_data=module_role) - - -def ensure(module, client): - state = module.params['state'] - name = module.params['cn'] - group = module.params['group'] - host = module.params['host'] - hostgroup = module.params['hostgroup'] - privilege = module.params['privilege'] - service = module.params['service'] - user = module.params['user'] - - module_role = get_role_dict(description=module.params['description']) - ipa_role = client.role_find(name=name) - - changed = False - if state == 'present': - if not ipa_role: - changed = True - if not module.check_mode: - ipa_role = client.role_add(name=name, item=module_role) - else: - diff = get_role_diff(client, ipa_role, module_role) - if len(diff) > 0: - changed = True - if not module.check_mode: - data = {} - for key in diff: - data[key] = module_role.get(key) - client.role_mod(name=name, item=data) - - if group is not None: - changed = client.modify_if_diff(name, ipa_role.get('member_group', []), group, - client.role_add_group, - client.role_remove_group) or changed - if host is not None: - changed = client.modify_if_diff(name, ipa_role.get('member_host', []), host, - client.role_add_host, - client.role_remove_host) or changed - - if hostgroup is not None: - changed = client.modify_if_diff(name, ipa_role.get('member_hostgroup', []), hostgroup, - client.role_add_hostgroup, - client.role_remove_hostgroup) or changed - - if privilege is not None: - changed = client.modify_if_diff(name, ipa_role.get('memberof_privilege', []), privilege, - client.role_add_privilege, - client.role_remove_privilege) or changed - if service is not None: - changed = client.modify_if_diff(name, ipa_role.get('member_service', []), service, - client.role_add_service, - client.role_remove_service) or changed - if user is not None: - changed = client.modify_if_diff(name, ipa_role.get('member_user', []), user, - client.role_add_user, - client.role_remove_user) or changed - - else: - if ipa_role: - changed = True - if not module.check_mode: - client.role_del(name) - - return changed, client.role_find(name=name) - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), - description=dict(type='str'), - group=dict(type='list', elements='str'), - host=dict(type='list', elements='str'), - hostgroup=dict(type='list', elements='str'), - privilege=dict(type='list', elements='str'), - service=dict(type='list', elements='str'), - state=dict(type='str', default='present', choices=['present', 'absent']), - user=dict(type='list', elements='str')) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) - - client = RoleIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) - - try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) - changed, role = ensure(module, client) - module.exit_json(changed=changed, role=role) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_service.py b/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_service.py deleted file mode 100644 index f85b80d4..00000000 --- a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_service.py +++ /dev/null @@ -1,208 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_service -author: Cédric Parent (@cprh) -short_description: Manage FreeIPA service -description: -- Add and delete an IPA service using IPA API. -options: - krbcanonicalname: - description: - - Principal of the service. - - Can not be changed as it is the unique identifier. - required: true - aliases: ["name"] - type: str - hosts: - description: - - Defines the list of 'ManagedBy' hosts. - required: false - type: list - elements: str - force: - description: - - Force principal name even if host is not in DNS. - required: false - type: bool - state: - description: State to ensure. - required: false - default: present - choices: ["absent", "present"] - type: str -extends_documentation_fragment: -- community.general.ipa.documentation - -''' - -EXAMPLES = r''' -- name: Ensure service is present - community.general.ipa_service: - name: http/host01.example.com - state: present - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure service is absent - community.general.ipa_service: - name: http/host01.example.com - state: absent - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Changing Managing hosts list - community.general.ipa_service: - name: http/host01.example.com - hosts: - - host01.example.com - - host02.example.com - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret -''' - -RETURN = r''' -service: - description: Service as returned by IPA API. - returned: always - type: dict -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class ServiceIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(ServiceIPAClient, self).__init__(module, host, port, protocol) - - def service_find(self, name): - return self._post_json(method='service_find', name=None, item={'all': True, 'krbcanonicalname': name}) - - def service_add(self, name, service): - return self._post_json(method='service_add', name=name, item=service) - - def service_mod(self, name, service): - return self._post_json(method='service_mod', name=name, item=service) - - def service_del(self, name): - return self._post_json(method='service_del', name=name) - - def service_disable(self, name): - return self._post_json(method='service_disable', name=name) - - def service_add_host(self, name, item): - return self._post_json(method='service_add_host', name=name, item={'host': item}) - - def service_remove_host(self, name, item): - return self._post_json(method='service_remove_host', name=name, item={'host': item}) - - -def get_service_dict(force=None, krbcanonicalname=None): - data = {} - if force is not None: - data['force'] = force - if krbcanonicalname is not None: - data['krbcanonicalname'] = krbcanonicalname - return data - - -def get_service_diff(client, ipa_host, module_service): - non_updateable_keys = ['force', 'krbcanonicalname'] - for key in non_updateable_keys: - if key in module_service: - del module_service[key] - - return client.get_diff(ipa_data=ipa_host, module_data=module_service) - - -def ensure(module, client): - name = module.params['krbcanonicalname'] - state = module.params['state'] - hosts = module.params['hosts'] - - ipa_service = client.service_find(name=name) - module_service = get_service_dict(force=module.params['force']) - changed = False - if state in ['present', 'enabled', 'disabled']: - if not ipa_service: - changed = True - if not module.check_mode: - client.service_add(name=name, service=module_service) - else: - diff = get_service_diff(client, ipa_service, module_service) - if len(diff) > 0: - changed = True - if not module.check_mode: - data = {} - for key in diff: - data[key] = module_service.get(key) - client.service_mod(name=name, service=data) - if hosts is not None: - if 'managedby_host' in ipa_service: - for host in ipa_service['managedby_host']: - if host not in hosts: - if not module.check_mode: - client.service_remove_host(name=name, item=host) - changed = True - for host in hosts: - if host not in ipa_service['managedby_host']: - if not module.check_mode: - client.service_add_host(name=name, item=host) - changed = True - else: - for host in hosts: - if not module.check_mode: - client.service_add_host(name=name, item=host) - changed = True - - else: - if ipa_service: - changed = True - if not module.check_mode: - client.service_del(name=name) - - return changed, client.service_find(name=name) - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update( - krbcanonicalname=dict(type='str', required=True, aliases=['name']), - force=dict(type='bool', required=False), - hosts=dict(type='list', required=False, elements='str'), - state=dict(type='str', required=False, default='present', - choices=['present', 'absent'])) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) - - client = ServiceIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) - - try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) - changed, host = ensure(module, client) - module.exit_json(changed=changed, host=host) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_subca.py b/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_subca.py deleted file mode 100644 index 387d63c5..00000000 --- a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_subca.py +++ /dev/null @@ -1,212 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2017, Abhijeet Kasurde (akasurde@redhat.com) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_subca -author: Abhijeet Kasurde (@Akasurde) -short_description: Manage FreeIPA Lightweight Sub Certificate Authorities. -description: -- Add, modify, enable, disable and delete an IPA Lightweight Sub Certificate Authorities using IPA API. -options: - subca_name: - description: - - The Sub Certificate Authority name which needs to be managed. - required: true - aliases: ["name"] - type: str - subca_subject: - description: - - The Sub Certificate Authority's Subject. e.g., 'CN=SampleSubCA1,O=testrelm.test'. - required: true - type: str - subca_desc: - description: - - The Sub Certificate Authority's description. - type: str - state: - description: - - State to ensure. - - State 'disable' and 'enable' is available for FreeIPA 4.4.2 version and onwards. - required: false - default: present - choices: ["absent", "disabled", "enabled", "present"] - type: str -extends_documentation_fragment: -- community.general.ipa.documentation - -''' - -EXAMPLES = ''' -- name: Ensure IPA Sub CA is present - community.general.ipa_subca: - ipa_host: spider.example.com - ipa_pass: Passw0rd! - state: present - subca_name: AnsibleSubCA1 - subca_subject: 'CN=AnsibleSubCA1,O=example.com' - subca_desc: Ansible Sub CA - -- name: Ensure that IPA Sub CA is removed - community.general.ipa_subca: - ipa_host: spider.example.com - ipa_pass: Passw0rd! - state: absent - subca_name: AnsibleSubCA1 - -- name: Ensure that IPA Sub CA is disabled - community.general.ipa_subca: - ipa_host: spider.example.com - ipa_pass: Passw0rd! - state: disable - subca_name: AnsibleSubCA1 -''' - -RETURN = r''' -subca: - description: IPA Sub CA record as returned by IPA API. - returned: always - type: dict -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - - -class SubCAIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(SubCAIPAClient, self).__init__(module, host, port, protocol) - - def subca_find(self, subca_name): - return self._post_json(method='ca_find', name=subca_name, item=None) - - def subca_add(self, subca_name=None, subject_dn=None, details=None): - item = dict(ipacasubjectdn=subject_dn) - subca_desc = details.get('description', None) - if subca_desc is not None: - item.update(description=subca_desc) - return self._post_json(method='ca_add', name=subca_name, item=item) - - def subca_mod(self, subca_name=None, diff=None, details=None): - item = get_subca_dict(details) - for change in diff: - update_detail = dict() - if item[change] is not None: - update_detail.update(setattr="{0}={1}".format(change, item[change])) - self._post_json(method='ca_mod', name=subca_name, item=update_detail) - - def subca_del(self, subca_name=None): - return self._post_json(method='ca_del', name=subca_name) - - def subca_disable(self, subca_name=None): - return self._post_json(method='ca_disable', name=subca_name) - - def subca_enable(self, subca_name=None): - return self._post_json(method='ca_enable', name=subca_name) - - -def get_subca_dict(details=None): - module_subca = dict() - if details['description'] is not None: - module_subca['description'] = details['description'] - if details['subca_subject'] is not None: - module_subca['ipacasubjectdn'] = details['subca_subject'] - return module_subca - - -def get_subca_diff(client, ipa_subca, module_subca): - details = get_subca_dict(module_subca) - return client.get_diff(ipa_data=ipa_subca, module_data=details) - - -def ensure(module, client): - subca_name = module.params['subca_name'] - subca_subject_dn = module.params['subca_subject'] - subca_desc = module.params['subca_desc'] - - state = module.params['state'] - - ipa_subca = client.subca_find(subca_name) - module_subca = dict(description=subca_desc, - subca_subject=subca_subject_dn) - - changed = False - if state == 'present': - if not ipa_subca: - changed = True - if not module.check_mode: - client.subca_add(subca_name=subca_name, subject_dn=subca_subject_dn, details=module_subca) - else: - diff = get_subca_diff(client, ipa_subca, module_subca) - # IPA does not allow to modify Sub CA's subject DN - # So skip it for now. - if 'ipacasubjectdn' in diff: - diff.remove('ipacasubjectdn') - del module_subca['subca_subject'] - - if len(diff) > 0: - changed = True - if not module.check_mode: - client.subca_mod(subca_name=subca_name, diff=diff, details=module_subca) - elif state == 'absent': - if ipa_subca: - changed = True - if not module.check_mode: - client.subca_del(subca_name=subca_name) - elif state == 'disable': - ipa_version = client.get_ipa_version() - if LooseVersion(ipa_version) < LooseVersion('4.4.2'): - module.fail_json(msg="Current version of IPA server [%s] does not support 'CA disable' option. Please upgrade to " - "version greater than 4.4.2") - if ipa_subca: - changed = True - if not module.check_mode: - client.subca_disable(subca_name=subca_name) - elif state == 'enable': - ipa_version = client.get_ipa_version() - if LooseVersion(ipa_version) < LooseVersion('4.4.2'): - module.fail_json(msg="Current version of IPA server [%s] does not support 'CA enable' option. Please upgrade to " - "version greater than 4.4.2") - if ipa_subca: - changed = True - if not module.check_mode: - client.subca_enable(subca_name=subca_name) - - return changed, client.subca_find(subca_name) - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update(subca_name=dict(type='str', required=True, aliases=['name']), - subca_subject=dict(type='str', required=True), - subca_desc=dict(type='str'), - state=dict(type='str', default='present', - choices=['present', 'absent', 'enabled', 'disabled']),) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True,) - - client = SubCAIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) - - try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) - changed, record = ensure(module, client) - module.exit_json(changed=changed, record=record) - except Exception as exc: - module.fail_json(msg=to_native(exc)) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudocmd.py b/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudocmd.py deleted file mode 100644 index d75aff44..00000000 --- a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudocmd.py +++ /dev/null @@ -1,151 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_sudocmd -author: Thomas Krahn (@Nosmoht) -short_description: Manage FreeIPA sudo command -description: -- Add, modify or delete sudo command within FreeIPA server using FreeIPA API. -options: - sudocmd: - description: - - Sudo command. - aliases: ['name'] - required: true - type: str - description: - description: - - A description of this command. - type: str - state: - description: State to ensure. - default: present - choices: ['absent', 'disabled', 'enabled', 'present'] - type: str -extends_documentation_fragment: -- community.general.ipa.documentation - -''' - -EXAMPLES = r''' -- name: Ensure sudo command exists - community.general.ipa_sudocmd: - name: su - description: Allow to run su via sudo - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure sudo command does not exist - community.general.ipa_sudocmd: - name: su - state: absent - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret -''' - -RETURN = r''' -sudocmd: - description: Sudo command as return from IPA API - returned: always - type: dict -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class SudoCmdIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(SudoCmdIPAClient, self).__init__(module, host, port, protocol) - - def sudocmd_find(self, name): - return self._post_json(method='sudocmd_find', name=None, item={'all': True, 'sudocmd': name}) - - def sudocmd_add(self, name, item): - return self._post_json(method='sudocmd_add', name=name, item=item) - - def sudocmd_mod(self, name, item): - return self._post_json(method='sudocmd_mod', name=name, item=item) - - def sudocmd_del(self, name): - return self._post_json(method='sudocmd_del', name=name) - - -def get_sudocmd_dict(description=None): - data = {} - if description is not None: - data['description'] = description - return data - - -def get_sudocmd_diff(client, ipa_sudocmd, module_sudocmd): - return client.get_diff(ipa_data=ipa_sudocmd, module_data=module_sudocmd) - - -def ensure(module, client): - name = module.params['sudocmd'] - state = module.params['state'] - - module_sudocmd = get_sudocmd_dict(description=module.params['description']) - ipa_sudocmd = client.sudocmd_find(name=name) - - changed = False - if state == 'present': - if not ipa_sudocmd: - changed = True - if not module.check_mode: - client.sudocmd_add(name=name, item=module_sudocmd) - else: - diff = get_sudocmd_diff(client, ipa_sudocmd, module_sudocmd) - if len(diff) > 0: - changed = True - if not module.check_mode: - data = {} - for key in diff: - data[key] = module_sudocmd.get(key) - client.sudocmd_mod(name=name, item=data) - else: - if ipa_sudocmd: - changed = True - if not module.check_mode: - client.sudocmd_del(name=name) - - return changed, client.sudocmd_find(name=name) - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update(description=dict(type='str'), - state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), - sudocmd=dict(type='str', required=True, aliases=['name'])) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) - - client = SudoCmdIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) - try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) - changed, sudocmd = ensure(module, client) - module.exit_json(changed=changed, sudocmd=sudocmd) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudocmdgroup.py b/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudocmdgroup.py deleted file mode 100644 index 65fdd4f7..00000000 --- a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudocmdgroup.py +++ /dev/null @@ -1,179 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_sudocmdgroup -author: Thomas Krahn (@Nosmoht) -short_description: Manage FreeIPA sudo command group -description: -- Add, modify or delete sudo command group within IPA server using IPA API. -options: - cn: - description: - - Sudo Command Group. - aliases: ['name'] - required: true - type: str - description: - description: - - Group description. - type: str - state: - description: State to ensure. - default: present - choices: ['absent', 'disabled', 'enabled', 'present'] - type: str - sudocmd: - description: - - List of sudo commands to assign to the group. - - If an empty list is passed all assigned commands will be removed from the group. - - If option is omitted sudo commands will not be checked or changed. - type: list - elements: str -extends_documentation_fragment: -- community.general.ipa.documentation - -''' - -EXAMPLES = r''' -- name: Ensure sudo command group exists - community.general.ipa_sudocmdgroup: - name: group01 - description: Group of important commands - sudocmd: - - su - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure sudo command group does not exist - community.general.ipa_sudocmdgroup: - name: group01 - state: absent - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret -''' - -RETURN = r''' -sudocmdgroup: - description: Sudo command group as returned by IPA API - returned: always - type: dict -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class SudoCmdGroupIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(SudoCmdGroupIPAClient, self).__init__(module, host, port, protocol) - - def sudocmdgroup_find(self, name): - return self._post_json(method='sudocmdgroup_find', name=None, item={'all': True, 'cn': name}) - - def sudocmdgroup_add(self, name, item): - return self._post_json(method='sudocmdgroup_add', name=name, item=item) - - def sudocmdgroup_mod(self, name, item): - return self._post_json(method='sudocmdgroup_mod', name=name, item=item) - - def sudocmdgroup_del(self, name): - return self._post_json(method='sudocmdgroup_del', name=name) - - def sudocmdgroup_add_member(self, name, item): - return self._post_json(method='sudocmdgroup_add_member', name=name, item=item) - - def sudocmdgroup_add_member_sudocmd(self, name, item): - return self.sudocmdgroup_add_member(name=name, item={'sudocmd': item}) - - def sudocmdgroup_remove_member(self, name, item): - return self._post_json(method='sudocmdgroup_remove_member', name=name, item=item) - - def sudocmdgroup_remove_member_sudocmd(self, name, item): - return self.sudocmdgroup_remove_member(name=name, item={'sudocmd': item}) - - -def get_sudocmdgroup_dict(description=None): - data = {} - if description is not None: - data['description'] = description - return data - - -def get_sudocmdgroup_diff(client, ipa_sudocmdgroup, module_sudocmdgroup): - return client.get_diff(ipa_data=ipa_sudocmdgroup, module_data=module_sudocmdgroup) - - -def ensure(module, client): - name = module.params['cn'] - state = module.params['state'] - sudocmd = module.params['sudocmd'] - - module_sudocmdgroup = get_sudocmdgroup_dict(description=module.params['description']) - ipa_sudocmdgroup = client.sudocmdgroup_find(name=name) - - changed = False - if state == 'present': - if not ipa_sudocmdgroup: - changed = True - if not module.check_mode: - ipa_sudocmdgroup = client.sudocmdgroup_add(name=name, item=module_sudocmdgroup) - else: - diff = get_sudocmdgroup_diff(client, ipa_sudocmdgroup, module_sudocmdgroup) - if len(diff) > 0: - changed = True - if not module.check_mode: - data = {} - for key in diff: - data[key] = module_sudocmdgroup.get(key) - client.sudocmdgroup_mod(name=name, item=data) - - if sudocmd is not None: - changed = client.modify_if_diff(name, ipa_sudocmdgroup.get('member_sudocmd', []), sudocmd, - client.sudocmdgroup_add_member_sudocmd, - client.sudocmdgroup_remove_member_sudocmd) - else: - if ipa_sudocmdgroup: - changed = True - if not module.check_mode: - client.sudocmdgroup_del(name=name) - - return changed, client.sudocmdgroup_find(name=name) - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), - description=dict(type='str'), - state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), - sudocmd=dict(type='list', elements='str')) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) - - client = SudoCmdGroupIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) - try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) - changed, sudocmdgroup = ensure(module, client) - module.exit_json(changed=changed, sudorule=sudocmdgroup) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudorule.py b/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudorule.py deleted file mode 100644 index 2054599f..00000000 --- a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudorule.py +++ /dev/null @@ -1,464 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_sudorule -author: Thomas Krahn (@Nosmoht) -short_description: Manage FreeIPA sudo rule -description: -- Add, modify or delete sudo rule within IPA server using IPA API. -options: - cn: - description: - - Canonical name. - - Can not be changed as it is the unique identifier. - required: true - aliases: ['name'] - type: str - cmdcategory: - description: - - Command category the rule applies to. - choices: ['all'] - type: str - cmd: - description: - - List of commands assigned to the rule. - - If an empty list is passed all commands will be removed from the rule. - - If option is omitted commands will not be checked or changed. - type: list - elements: str - cmdgroup: - description: - - List of command groups assigned to the rule. - - If an empty list is passed all command groups will be removed from the rule. - - If option is omitted command groups will not be checked or changed. - type: list - elements: str - version_added: 2.0.0 - description: - description: - - Description of the sudo rule. - type: str - host: - description: - - List of hosts assigned to the rule. - - If an empty list is passed all hosts will be removed from the rule. - - If option is omitted hosts will not be checked or changed. - - Option C(hostcategory) must be omitted to assign hosts. - type: list - elements: str - hostcategory: - description: - - Host category the rule applies to. - - If 'all' is passed one must omit C(host) and C(hostgroup). - - Option C(host) and C(hostgroup) must be omitted to assign 'all'. - choices: ['all'] - type: str - hostgroup: - description: - - List of host groups assigned to the rule. - - If an empty list is passed all host groups will be removed from the rule. - - If option is omitted host groups will not be checked or changed. - - Option C(hostcategory) must be omitted to assign host groups. - type: list - elements: str - runasextusers: - description: - - List of external RunAs users - type: list - elements: str - version_added: 2.3.0 - runasusercategory: - description: - - RunAs User category the rule applies to. - choices: ['all'] - type: str - runasgroupcategory: - description: - - RunAs Group category the rule applies to. - choices: ['all'] - type: str - sudoopt: - description: - - List of options to add to the sudo rule. - type: list - elements: str - user: - description: - - List of users assigned to the rule. - - If an empty list is passed all users will be removed from the rule. - - If option is omitted users will not be checked or changed. - type: list - elements: str - usercategory: - description: - - User category the rule applies to. - choices: ['all'] - type: str - usergroup: - description: - - List of user groups assigned to the rule. - - If an empty list is passed all user groups will be removed from the rule. - - If option is omitted user groups will not be checked or changed. - type: list - elements: str - state: - description: State to ensure. - default: present - choices: ['absent', 'disabled', 'enabled', 'present'] - type: str -extends_documentation_fragment: -- community.general.ipa.documentation - -''' - -EXAMPLES = r''' -- name: Ensure sudo rule is present that's allows all every body to execute any command on any host without being asked for a password. - community.general.ipa_sudorule: - name: sudo_all_nopasswd - cmdcategory: all - description: Allow to run every command with sudo without password - hostcategory: all - sudoopt: - - '!authenticate' - usercategory: all - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure user group developers can run every command on host group db-server as well as on host db01.example.com. - community.general.ipa_sudorule: - name: sudo_dev_dbserver - description: Allow developers to run every command with sudo on all database server - cmdcategory: all - host: - - db01.example.com - hostgroup: - - db-server - sudoopt: - - '!authenticate' - usergroup: - - developers - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure user group operations can run any commands that is part of operations-cmdgroup on any host as user root. - community.general.ipa_sudorule: - name: sudo_operations_all - description: Allow operators to run any commands that is part of operations-cmdgroup on any host as user root. - cmdgroup: - - operations-cmdgroup - hostcategory: all - runasextusers: - - root - sudoopt: - - '!authenticate' - usergroup: - - operators - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret -''' - -RETURN = r''' -sudorule: - description: Sudorule as returned by IPA - returned: always - type: dict -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class SudoRuleIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(SudoRuleIPAClient, self).__init__(module, host, port, protocol) - - def sudorule_find(self, name): - return self._post_json(method='sudorule_find', name=None, item={'all': True, 'cn': name}) - - def sudorule_add(self, name, item): - return self._post_json(method='sudorule_add', name=name, item=item) - - def sudorule_add_runasuser(self, name, item): - return self._post_json(method='sudorule_add_runasuser', name=name, item={'user': item}) - - def sudorule_remove_runasuser(self, name, item): - return self._post_json(method='sudorule_remove_runasuser', name=name, item={'user': item}) - - def sudorule_mod(self, name, item): - return self._post_json(method='sudorule_mod', name=name, item=item) - - def sudorule_del(self, name): - return self._post_json(method='sudorule_del', name=name) - - def sudorule_add_option(self, name, item): - return self._post_json(method='sudorule_add_option', name=name, item=item) - - def sudorule_add_option_ipasudoopt(self, name, item): - return self.sudorule_add_option(name=name, item={'ipasudoopt': item}) - - def sudorule_remove_option(self, name, item): - return self._post_json(method='sudorule_remove_option', name=name, item=item) - - def sudorule_remove_option_ipasudoopt(self, name, item): - return self.sudorule_remove_option(name=name, item={'ipasudoopt': item}) - - def sudorule_add_host(self, name, item): - return self._post_json(method='sudorule_add_host', name=name, item=item) - - def sudorule_add_host_host(self, name, item): - return self.sudorule_add_host(name=name, item={'host': item}) - - def sudorule_add_host_hostgroup(self, name, item): - return self.sudorule_add_host(name=name, item={'hostgroup': item}) - - def sudorule_remove_host(self, name, item): - return self._post_json(method='sudorule_remove_host', name=name, item=item) - - def sudorule_remove_host_host(self, name, item): - return self.sudorule_remove_host(name=name, item={'host': item}) - - def sudorule_remove_host_hostgroup(self, name, item): - return self.sudorule_remove_host(name=name, item={'hostgroup': item}) - - def sudorule_add_allow_command(self, name, item): - return self._post_json(method='sudorule_add_allow_command', name=name, item={'sudocmd': item}) - - def sudorule_add_allow_command_group(self, name, item): - return self._post_json(method='sudorule_add_allow_command', name=name, item={'sudocmdgroup': item}) - - def sudorule_remove_allow_command(self, name, item): - return self._post_json(method='sudorule_remove_allow_command', name=name, item=item) - - def sudorule_add_user(self, name, item): - return self._post_json(method='sudorule_add_user', name=name, item=item) - - def sudorule_add_user_user(self, name, item): - return self.sudorule_add_user(name=name, item={'user': item}) - - def sudorule_add_user_group(self, name, item): - return self.sudorule_add_user(name=name, item={'group': item}) - - def sudorule_remove_user(self, name, item): - return self._post_json(method='sudorule_remove_user', name=name, item=item) - - def sudorule_remove_user_user(self, name, item): - return self.sudorule_remove_user(name=name, item={'user': item}) - - def sudorule_remove_user_group(self, name, item): - return self.sudorule_remove_user(name=name, item={'group': item}) - - -def get_sudorule_dict(cmdcategory=None, description=None, hostcategory=None, ipaenabledflag=None, usercategory=None, - runasgroupcategory=None, runasusercategory=None): - data = {} - if cmdcategory is not None: - data['cmdcategory'] = cmdcategory - if description is not None: - data['description'] = description - if hostcategory is not None: - data['hostcategory'] = hostcategory - if ipaenabledflag is not None: - data['ipaenabledflag'] = ipaenabledflag - if usercategory is not None: - data['usercategory'] = usercategory - if runasusercategory is not None: - data['ipasudorunasusercategory'] = runasusercategory - if runasgroupcategory is not None: - data['ipasudorunasgroupcategory'] = runasgroupcategory - return data - - -def category_changed(module, client, category_name, ipa_sudorule): - if ipa_sudorule.get(category_name, None) == ['all']: - if not module.check_mode: - # cn is returned as list even with only a single value. - client.sudorule_mod(name=ipa_sudorule.get('cn')[0], item={category_name: None}) - return True - return False - - -def ensure(module, client): - state = module.params['state'] - name = module.params['cn'] - cmd = module.params['cmd'] - cmdgroup = module.params['cmdgroup'] - cmdcategory = module.params['cmdcategory'] - host = module.params['host'] - hostcategory = module.params['hostcategory'] - hostgroup = module.params['hostgroup'] - runasusercategory = module.params['runasusercategory'] - runasgroupcategory = module.params['runasgroupcategory'] - runasextusers = module.params['runasextusers'] - - if state in ['present', 'enabled']: - ipaenabledflag = 'TRUE' - else: - ipaenabledflag = 'FALSE' - - sudoopt = module.params['sudoopt'] - user = module.params['user'] - usercategory = module.params['usercategory'] - usergroup = module.params['usergroup'] - - module_sudorule = get_sudorule_dict(cmdcategory=cmdcategory, - description=module.params['description'], - hostcategory=hostcategory, - ipaenabledflag=ipaenabledflag, - usercategory=usercategory, - runasusercategory=runasusercategory, - runasgroupcategory=runasgroupcategory) - ipa_sudorule = client.sudorule_find(name=name) - - changed = False - if state in ['present', 'disabled', 'enabled']: - if not ipa_sudorule: - changed = True - if not module.check_mode: - ipa_sudorule = client.sudorule_add(name=name, item=module_sudorule) - else: - diff = client.get_diff(ipa_sudorule, module_sudorule) - if len(diff) > 0: - changed = True - if not module.check_mode: - if 'hostcategory' in diff: - if ipa_sudorule.get('memberhost_host', None) is not None: - client.sudorule_remove_host_host(name=name, item=ipa_sudorule.get('memberhost_host')) - if ipa_sudorule.get('memberhost_hostgroup', None) is not None: - client.sudorule_remove_host_hostgroup(name=name, - item=ipa_sudorule.get('memberhost_hostgroup')) - - client.sudorule_mod(name=name, item=module_sudorule) - - if cmd is not None: - changed = category_changed(module, client, 'cmdcategory', ipa_sudorule) or changed - if not module.check_mode: - client.sudorule_add_allow_command(name=name, item=cmd) - - if cmdgroup is not None: - changed = category_changed(module, client, 'cmdcategory', ipa_sudorule) or changed - if not module.check_mode: - client.sudorule_add_allow_command_group(name=name, item=cmdgroup) - - if runasusercategory is not None: - changed = category_changed(module, client, 'iparunasusercategory', ipa_sudorule) or changed - - if runasgroupcategory is not None: - changed = category_changed(module, client, 'iparunasgroupcategory', ipa_sudorule) or changed - - if host is not None: - changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed - changed = client.modify_if_diff(name, ipa_sudorule.get('memberhost_host', []), host, - client.sudorule_add_host_host, - client.sudorule_remove_host_host) or changed - - if hostgroup is not None: - changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed - changed = client.modify_if_diff(name, ipa_sudorule.get('memberhost_hostgroup', []), hostgroup, - client.sudorule_add_host_hostgroup, - client.sudorule_remove_host_hostgroup) or changed - if sudoopt is not None: - # client.modify_if_diff does not work as each option must be removed/added by its own - ipa_list = ipa_sudorule.get('ipasudoopt', []) - module_list = sudoopt - diff = list(set(ipa_list) - set(module_list)) - if len(diff) > 0: - changed = True - if not module.check_mode: - for item in diff: - client.sudorule_remove_option_ipasudoopt(name, item) - diff = list(set(module_list) - set(ipa_list)) - if len(diff) > 0: - changed = True - if not module.check_mode: - for item in diff: - client.sudorule_add_option_ipasudoopt(name, item) - - if runasextusers is not None: - ipa_sudorule_run_as_user = ipa_sudorule.get('ipasudorunasextuser', []) - diff = list(set(ipa_sudorule_run_as_user) - set(runasextusers)) - if len(diff) > 0: - changed = True - if not module.check_mode: - for item in diff: - client.sudorule_remove_runasuser(name=name, item=item) - diff = list(set(runasextusers) - set(ipa_sudorule_run_as_user)) - if len(diff) > 0: - changed = True - if not module.check_mode: - for item in diff: - client.sudorule_add_runasuser(name=name, item=item) - - if user is not None: - changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed - changed = client.modify_if_diff(name, ipa_sudorule.get('memberuser_user', []), user, - client.sudorule_add_user_user, - client.sudorule_remove_user_user) or changed - if usergroup is not None: - changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed - changed = client.modify_if_diff(name, ipa_sudorule.get('memberuser_group', []), usergroup, - client.sudorule_add_user_group, - client.sudorule_remove_user_group) or changed - else: - if ipa_sudorule: - changed = True - if not module.check_mode: - client.sudorule_del(name) - - return changed, client.sudorule_find(name) - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update(cmd=dict(type='list', elements='str'), - cmdgroup=dict(type='list', elements='str'), - cmdcategory=dict(type='str', choices=['all']), - cn=dict(type='str', required=True, aliases=['name']), - description=dict(type='str'), - host=dict(type='list', elements='str'), - hostcategory=dict(type='str', choices=['all']), - hostgroup=dict(type='list', elements='str'), - runasusercategory=dict(type='str', choices=['all']), - runasgroupcategory=dict(type='str', choices=['all']), - sudoopt=dict(type='list', elements='str'), - state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), - user=dict(type='list', elements='str'), - usercategory=dict(type='str', choices=['all']), - usergroup=dict(type='list', elements='str'), - runasextusers=dict(type='list', elements='str')) - module = AnsibleModule(argument_spec=argument_spec, - mutually_exclusive=[['cmdcategory', 'cmd'], - ['cmdcategory', 'cmdgroup'], - ['hostcategory', 'host'], - ['hostcategory', 'hostgroup'], - ['usercategory', 'user'], - ['usercategory', 'usergroup']], - supports_check_mode=True) - - client = SudoRuleIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) - try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) - changed, sudorule = ensure(module, client) - module.exit_json(changed=changed, sudorule=sudorule) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_user.py b/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_user.py deleted file mode 100644 index 8a7b3abe..00000000 --- a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_user.py +++ /dev/null @@ -1,397 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_user -author: Thomas Krahn (@Nosmoht) -short_description: Manage FreeIPA users -description: -- Add, modify and delete user within IPA server. -options: - displayname: - description: Display name. - type: str - update_password: - description: - - Set password for a user. - type: str - default: 'always' - choices: [ always, on_create ] - givenname: - description: First name. - type: str - krbpasswordexpiration: - description: - - Date at which the user password will expire. - - In the format YYYYMMddHHmmss. - - e.g. 20180121182022 will expire on 21 January 2018 at 18:20:22. - type: str - loginshell: - description: Login shell. - type: str - mail: - description: - - List of mail addresses assigned to the user. - - If an empty list is passed all assigned email addresses will be deleted. - - If None is passed email addresses will not be checked or changed. - type: list - elements: str - password: - description: - - Password for a user. - - Will not be set for an existing user unless I(update_password=always), which is the default. - type: str - sn: - description: Surname. - type: str - sshpubkey: - description: - - List of public SSH key. - - If an empty list is passed all assigned public keys will be deleted. - - If None is passed SSH public keys will not be checked or changed. - type: list - elements: str - state: - description: State to ensure. - default: "present" - choices: ["absent", "disabled", "enabled", "present"] - type: str - telephonenumber: - description: - - List of telephone numbers assigned to the user. - - If an empty list is passed all assigned telephone numbers will be deleted. - - If None is passed telephone numbers will not be checked or changed. - type: list - elements: str - title: - description: Title. - type: str - uid: - description: uid of the user. - required: true - aliases: ["name"] - type: str - uidnumber: - description: - - Account Settings UID/Posix User ID number. - type: str - gidnumber: - description: - - Posix Group ID. - type: str - homedirectory: - description: - - Default home directory of the user. - type: str - version_added: '0.2.0' - userauthtype: - description: - - The authentication type to use for the user. - choices: ["password", "radius", "otp", "pkinit", "hardened"] - type: list - elements: str - version_added: '1.2.0' -extends_documentation_fragment: -- community.general.ipa.documentation - -requirements: -- base64 -- hashlib -''' - -EXAMPLES = r''' -- name: Ensure pinky is present and always reset password - community.general.ipa_user: - name: pinky - state: present - krbpasswordexpiration: 20200119235959 - givenname: Pinky - sn: Acme - mail: - - pinky@acme.com - telephonenumber: - - '+555123456' - sshpubkey: - - ssh-rsa .... - - ssh-dsa .... - uidnumber: '1001' - gidnumber: '100' - homedirectory: /home/pinky - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure brain is absent - community.general.ipa_user: - name: brain - state: absent - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure pinky is present but don't reset password if already exists - community.general.ipa_user: - name: pinky - state: present - givenname: Pinky - sn: Acme - password: zounds - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - update_password: on_create - -- name: Ensure pinky is present and using one time password and RADIUS authentication - community.general.ipa_user: - name: pinky - state: present - userauthtype: - - otp - - radius - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret -''' - -RETURN = r''' -user: - description: User as returned by IPA API - returned: always - type: dict -''' - -import base64 -import hashlib -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class UserIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(UserIPAClient, self).__init__(module, host, port, protocol) - - def user_find(self, name): - return self._post_json(method='user_find', name=None, item={'all': True, 'uid': name}) - - def user_add(self, name, item): - return self._post_json(method='user_add', name=name, item=item) - - def user_mod(self, name, item): - return self._post_json(method='user_mod', name=name, item=item) - - def user_del(self, name): - return self._post_json(method='user_del', name=name) - - def user_disable(self, name): - return self._post_json(method='user_disable', name=name) - - def user_enable(self, name): - return self._post_json(method='user_enable', name=name) - - -def get_user_dict(displayname=None, givenname=None, krbpasswordexpiration=None, loginshell=None, - mail=None, nsaccountlock=False, sn=None, sshpubkey=None, telephonenumber=None, - title=None, userpassword=None, gidnumber=None, uidnumber=None, homedirectory=None, - userauthtype=None): - user = {} - if displayname is not None: - user['displayname'] = displayname - if krbpasswordexpiration is not None: - user['krbpasswordexpiration'] = krbpasswordexpiration + "Z" - if givenname is not None: - user['givenname'] = givenname - if loginshell is not None: - user['loginshell'] = loginshell - if mail is not None: - user['mail'] = mail - user['nsaccountlock'] = nsaccountlock - if sn is not None: - user['sn'] = sn - if sshpubkey is not None: - user['ipasshpubkey'] = sshpubkey - if telephonenumber is not None: - user['telephonenumber'] = telephonenumber - if title is not None: - user['title'] = title - if userpassword is not None: - user['userpassword'] = userpassword - if gidnumber is not None: - user['gidnumber'] = gidnumber - if uidnumber is not None: - user['uidnumber'] = uidnumber - if homedirectory is not None: - user['homedirectory'] = homedirectory - if userauthtype is not None: - user['ipauserauthtype'] = userauthtype - - return user - - -def get_user_diff(client, ipa_user, module_user): - """ - Return the keys of each dict whereas values are different. Unfortunately the IPA - API returns everything as a list even if only a single value is possible. - Therefore some more complexity is needed. - The method will check if the value type of module_user.attr is not a list and - create a list with that element if the same attribute in ipa_user is list. In this way I hope that the method - must not be changed if the returned API dict is changed. - :param ipa_user: - :param module_user: - :return: - """ - # sshpubkeyfp is the list of ssh key fingerprints. IPA doesn't return the keys itself but instead the fingerprints. - # These are used for comparison. - sshpubkey = None - if 'ipasshpubkey' in module_user: - hash_algo = 'md5' - if 'sshpubkeyfp' in ipa_user and ipa_user['sshpubkeyfp'][0][:7].upper() == 'SHA256:': - hash_algo = 'sha256' - module_user['sshpubkeyfp'] = [get_ssh_key_fingerprint(pubkey, hash_algo) for pubkey in module_user['ipasshpubkey']] - # Remove the ipasshpubkey element as it is not returned from IPA but save it's value to be used later on - sshpubkey = module_user['ipasshpubkey'] - del module_user['ipasshpubkey'] - - result = client.get_diff(ipa_data=ipa_user, module_data=module_user) - - # If there are public keys, remove the fingerprints and add them back to the dict - if sshpubkey is not None: - del module_user['sshpubkeyfp'] - module_user['ipasshpubkey'] = sshpubkey - return result - - -def get_ssh_key_fingerprint(ssh_key, hash_algo='sha256'): - """ - Return the public key fingerprint of a given public SSH key - in format "[fp] [comment] (ssh-rsa)" where fp is of the format: - FB:0C:AC:0A:07:94:5B:CE:75:6E:63:32:13:AD:AD:D7 - for md5 or - SHA256:[base64] - for sha256 - Comments are assumed to be all characters past the second - whitespace character in the sshpubkey string. - :param ssh_key: - :param hash_algo: - :return: - """ - parts = ssh_key.strip().split(None, 2) - if len(parts) == 0: - return None - key_type = parts[0] - key = base64.b64decode(parts[1].encode('ascii')) - - if hash_algo == 'md5': - fp_plain = hashlib.md5(key).hexdigest() - key_fp = ':'.join(a + b for a, b in zip(fp_plain[::2], fp_plain[1::2])).upper() - elif hash_algo == 'sha256': - fp_plain = base64.b64encode(hashlib.sha256(key).digest()).decode('ascii').rstrip('=') - key_fp = 'SHA256:{fp}'.format(fp=fp_plain) - if len(parts) < 3: - return "%s (%s)" % (key_fp, key_type) - else: - comment = parts[2] - return "%s %s (%s)" % (key_fp, comment, key_type) - - -def ensure(module, client): - state = module.params['state'] - name = module.params['uid'] - nsaccountlock = state == 'disabled' - - module_user = get_user_dict(displayname=module.params.get('displayname'), - krbpasswordexpiration=module.params.get('krbpasswordexpiration'), - givenname=module.params.get('givenname'), - loginshell=module.params['loginshell'], - mail=module.params['mail'], sn=module.params['sn'], - sshpubkey=module.params['sshpubkey'], nsaccountlock=nsaccountlock, - telephonenumber=module.params['telephonenumber'], title=module.params['title'], - userpassword=module.params['password'], - gidnumber=module.params.get('gidnumber'), uidnumber=module.params.get('uidnumber'), - homedirectory=module.params.get('homedirectory'), - userauthtype=module.params.get('userauthtype')) - - update_password = module.params.get('update_password') - ipa_user = client.user_find(name=name) - - changed = False - if state in ['present', 'enabled', 'disabled']: - if not ipa_user: - changed = True - if not module.check_mode: - ipa_user = client.user_add(name=name, item=module_user) - else: - if update_password == 'on_create': - module_user.pop('userpassword', None) - diff = get_user_diff(client, ipa_user, module_user) - if len(diff) > 0: - changed = True - if not module.check_mode: - ipa_user = client.user_mod(name=name, item=module_user) - else: - if ipa_user: - changed = True - if not module.check_mode: - client.user_del(name) - - return changed, ipa_user - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update(displayname=dict(type='str'), - givenname=dict(type='str'), - update_password=dict(type='str', default="always", - choices=['always', 'on_create'], - no_log=False), - krbpasswordexpiration=dict(type='str', no_log=False), - loginshell=dict(type='str'), - mail=dict(type='list', elements='str'), - sn=dict(type='str'), - uid=dict(type='str', required=True, aliases=['name']), - gidnumber=dict(type='str'), - uidnumber=dict(type='str'), - password=dict(type='str', no_log=True), - sshpubkey=dict(type='list', elements='str'), - state=dict(type='str', default='present', - choices=['present', 'absent', 'enabled', 'disabled']), - telephonenumber=dict(type='list', elements='str'), - title=dict(type='str'), - homedirectory=dict(type='str'), - userauthtype=dict(type='list', elements='str', - choices=['password', 'radius', 'otp', 'pkinit', 'hardened'])) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) - - client = UserIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) - - # If sshpubkey is defined as None than module.params['sshpubkey'] is [None]. IPA itself returns None (not a list). - # Therefore a small check here to replace list(None) by None. Otherwise get_user_diff() would return sshpubkey - # as different which should be avoided. - if module.params['sshpubkey'] is not None: - if len(module.params['sshpubkey']) == 1 and module.params['sshpubkey'][0] == "": - module.params['sshpubkey'] = None - - try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) - changed, user = ensure(module, client) - module.exit_json(changed=changed, user=user) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_vault.py b/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_vault.py deleted file mode 100644 index 7a6a601f..00000000 --- a/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_vault.py +++ /dev/null @@ -1,249 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Juan Manuel Parrilla -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_vault -author: Juan Manuel Parrilla (@jparrill) -short_description: Manage FreeIPA vaults -description: -- Add, modify and delete vaults and secret vaults. -- KRA service should be enabled to use this module. -options: - cn: - description: - - Vault name. - - Can not be changed as it is the unique identifier. - required: true - aliases: ["name"] - type: str - description: - description: - - Description. - type: str - ipavaulttype: - description: - - Vault types are based on security level. - default: "symmetric" - choices: ["asymmetric", "standard", "symmetric"] - aliases: ["vault_type"] - type: str - ipavaultpublickey: - description: - - Public key. - aliases: ["vault_public_key"] - type: str - ipavaultsalt: - description: - - Vault Salt. - aliases: ["vault_salt"] - type: str - username: - description: - - Any user can own one or more user vaults. - - Mutually exclusive with service. - aliases: ["user"] - type: list - elements: str - service: - description: - - Any service can own one or more service vaults. - - Mutually exclusive with user. - type: str - state: - description: - - State to ensure. - default: "present" - choices: ["absent", "present"] - type: str - replace: - description: - - Force replace the existant vault on IPA server. - type: bool - default: False - choices: ["True", "False"] - validate_certs: - description: - - Validate IPA server certificates. - type: bool - default: true -extends_documentation_fragment: -- community.general.ipa.documentation - -''' - -EXAMPLES = r''' -- name: Ensure vault is present - community.general.ipa_vault: - name: vault01 - vault_type: standard - user: user01 - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - validate_certs: false - -- name: Ensure vault is present for Admin user - community.general.ipa_vault: - name: vault01 - vault_type: standard - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure vault is absent - community.general.ipa_vault: - name: vault01 - vault_type: standard - user: user01 - state: absent - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Modify vault if already exists - community.general.ipa_vault: - name: vault01 - vault_type: standard - description: "Vault for test" - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - replace: True - -- name: Get vault info if already exists - community.general.ipa_vault: - name: vault01 - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret -''' - -RETURN = r''' -vault: - description: Vault as returned by IPA API - returned: always - type: dict -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class VaultIPAClient(IPAClient): - def __init__(self, module, host, port, protocol): - super(VaultIPAClient, self).__init__(module, host, port, protocol) - - def vault_find(self, name): - return self._post_json(method='vault_find', name=None, item={'all': True, 'cn': name}) - - def vault_add_internal(self, name, item): - return self._post_json(method='vault_add_internal', name=name, item=item) - - def vault_mod_internal(self, name, item): - return self._post_json(method='vault_mod_internal', name=name, item=item) - - def vault_del(self, name): - return self._post_json(method='vault_del', name=name) - - -def get_vault_dict(description=None, vault_type=None, vault_salt=None, vault_public_key=None, service=None): - vault = {} - - if description is not None: - vault['description'] = description - if vault_type is not None: - vault['ipavaulttype'] = vault_type - if vault_salt is not None: - vault['ipavaultsalt'] = vault_salt - if vault_public_key is not None: - vault['ipavaultpublickey'] = vault_public_key - if service is not None: - vault['service'] = service - return vault - - -def get_vault_diff(client, ipa_vault, module_vault, module): - return client.get_diff(ipa_data=ipa_vault, module_data=module_vault) - - -def ensure(module, client): - state = module.params['state'] - name = module.params['cn'] - user = module.params['username'] - replace = module.params['replace'] - - module_vault = get_vault_dict(description=module.params['description'], vault_type=module.params['ipavaulttype'], - vault_salt=module.params['ipavaultsalt'], - vault_public_key=module.params['ipavaultpublickey'], - service=module.params['service']) - ipa_vault = client.vault_find(name=name) - - changed = False - if state == 'present': - if not ipa_vault: - # New vault - changed = True - if not module.check_mode: - ipa_vault = client.vault_add_internal(name, item=module_vault) - else: - # Already exists - if replace: - diff = get_vault_diff(client, ipa_vault, module_vault, module) - if len(diff) > 0: - changed = True - if not module.check_mode: - data = {} - for key in diff: - data[key] = module_vault.get(key) - client.vault_mod_internal(name=name, item=data) - - else: - if ipa_vault: - changed = True - if not module.check_mode: - client.vault_del(name) - - return changed, client.vault_find(name=name) - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), - description=dict(type='str'), - ipavaulttype=dict(type='str', default='symmetric', - choices=['standard', 'symmetric', 'asymmetric'], aliases=['vault_type']), - ipavaultsalt=dict(type='str', aliases=['vault_salt']), - ipavaultpublickey=dict(type='str', aliases=['vault_public_key']), - service=dict(type='str'), - replace=dict(type='bool', default=False, choices=[True, False]), - state=dict(type='str', default='present', choices=['present', 'absent']), - username=dict(type='list', elements='str', aliases=['user'])) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=[['username', 'service']]) - - client = VaultIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) - try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) - changed, vault = ensure(module, client) - module.exit_json(changed=changed, vault=vault) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_authentication.py b/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_authentication.py deleted file mode 100644 index c7bf5bc0..00000000 --- a/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_authentication.py +++ /dev/null @@ -1,502 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2019, INSPQ -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: keycloak_authentication - -short_description: Configure authentication in Keycloak - -description: - - This module actually can only make a copy of an existing authentication flow, add an execution to it and configure it. - - It can also delete the flow. - -version_added: "3.3.0" - -options: - realm: - description: - - The name of the realm in which is the authentication. - required: true - type: str - alias: - description: - - Alias for the authentication flow. - required: true - type: str - description: - description: - - Description of the flow. - type: str - providerId: - description: - - C(providerId) for the new flow when not copied from an existing flow. - type: str - copyFrom: - description: - - C(flowAlias) of the authentication flow to use for the copy. - type: str - authenticationExecutions: - description: - - Configuration structure for the executions. - type: list - elements: dict - suboptions: - providerId: - description: - - C(providerID) for the new flow when not copied from an existing flow. - type: str - displayName: - description: - - Name of the execution or subflow to create or update. - type: str - requirement: - description: - - Control status of the subflow or execution. - choices: [ "REQUIRED", "ALTERNATIVE", "DISABLED", "CONDITIONAL" ] - type: str - flowAlias: - description: - - Alias of parent flow. - type: str - authenticationConfig: - description: - - Describe the config of the authentication. - type: dict - index: - description: - - Priority order of the execution. - type: int - state: - description: - - Control if the authentication flow must exists or not. - choices: [ "present", "absent" ] - default: present - type: str - force: - type: bool - default: false - description: - - If C(true), allows to remove the authentication flow and recreate it. - -extends_documentation_fragment: -- community.general.keycloak - -author: - - Philippe Gauthier (@elfelip) - - Gaëtan Daubresse (@Gaetan2907) -''' - -EXAMPLES = ''' - - name: Create an authentication flow from first broker login and add an execution to it. - community.general.keycloak_authentication: - auth_keycloak_url: http://localhost:8080/auth - auth_realm: master - auth_username: admin - auth_password: password - realm: master - alias: "Copy of first broker login" - copyFrom: "first broker login" - authenticationExecutions: - - providerId: "test-execution1" - requirement: "REQUIRED" - authenticationConfig: - alias: "test.execution1.property" - config: - test1.property: "value" - - providerId: "test-execution2" - requirement: "REQUIRED" - authenticationConfig: - alias: "test.execution2.property" - config: - test2.property: "value" - state: present - - - name: Re-create the authentication flow - community.general.keycloak_authentication: - auth_keycloak_url: http://localhost:8080/auth - auth_realm: master - auth_username: admin - auth_password: password - realm: master - alias: "Copy of first broker login" - copyFrom: "first broker login" - authenticationExecutions: - - providerId: "test-provisioning" - requirement: "REQUIRED" - authenticationConfig: - alias: "test.provisioning.property" - config: - test.provisioning.property: "value" - state: present - force: true - - - name: Create an authentication flow with subflow containing an execution. - community.general.keycloak_authentication: - auth_keycloak_url: http://localhost:8080/auth - auth_realm: master - auth_username: admin - auth_password: password - realm: master - alias: "Copy of first broker login" - copyFrom: "first broker login" - authenticationExecutions: - - providerId: "test-execution1" - requirement: "REQUIRED" - - displayName: "New Subflow" - requirement: "REQUIRED" - - providerId: "auth-cookie" - requirement: "REQUIRED" - flowAlias: "New Sublow" - state: present - - - name: Remove authentication. - community.general.keycloak_authentication: - auth_keycloak_url: http://localhost:8080/auth - auth_realm: master - auth_username: admin - auth_password: password - realm: master - alias: "Copy of first broker login" - state: absent -''' - -RETURN = ''' -msg: - description: Message as to what action was taken. - returned: always - type: str - -flow: - description: - - JSON representation for the authentication. - - Deprecated return value, it will be removed in community.general 6.0.0. Please use the return value I(end_state) instead. - returned: on success - type: dict - sample: { - "alias": "Copy of first broker login", - "authenticationExecutions": [ - { - "alias": "review profile config", - "authenticationConfig": { - "alias": "review profile config", - "config": { "update.profile.on.first.login": "missing" }, - "id": "6f09e4fb-aad4-496a-b873-7fa9779df6d7" - }, - "configurable": true, - "displayName": "Review Profile", - "id": "8f77dab8-2008-416f-989e-88b09ccf0b4c", - "index": 0, - "level": 0, - "providerId": "idp-review-profile", - "requirement": "REQUIRED", - "requirementChoices": [ "REQUIRED", "ALTERNATIVE", "DISABLED" ] - } - ], - "builtIn": false, - "description": "Actions taken after first broker login with identity provider account, which is not yet linked to any Keycloak account", - "id": "bc228863-5887-4297-b898-4d988f8eaa5c", - "providerId": "basic-flow", - "topLevel": true - } - -end_state: - description: Representation of the authentication after module execution. - returned: on success - type: dict - sample: { - "alias": "Copy of first broker login", - "authenticationExecutions": [ - { - "alias": "review profile config", - "authenticationConfig": { - "alias": "review profile config", - "config": { "update.profile.on.first.login": "missing" }, - "id": "6f09e4fb-aad4-496a-b873-7fa9779df6d7" - }, - "configurable": true, - "displayName": "Review Profile", - "id": "8f77dab8-2008-416f-989e-88b09ccf0b4c", - "index": 0, - "level": 0, - "providerId": "idp-review-profile", - "requirement": "REQUIRED", - "requirementChoices": [ "REQUIRED", "ALTERNATIVE", "DISABLED" ] - } - ], - "builtIn": false, - "description": "Actions taken after first broker login with identity provider account, which is not yet linked to any Keycloak account", - "id": "bc228863-5887-4297-b898-4d988f8eaa5c", - "providerId": "basic-flow", - "topLevel": true - } -''' - -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak \ - import KeycloakAPI, camel, keycloak_argument_spec, get_token, KeycloakError, is_struct_included -from ansible.module_utils.basic import AnsibleModule - - -def find_exec_in_executions(searched_exec, executions): - """ - Search if exec is contained in the executions. - :param searched_exec: Execution to search for. - :param executions: List of executions. - :return: Index of the execution, -1 if not found.. - """ - for i, existing_exec in enumerate(executions, start=0): - if ("providerId" in existing_exec and "providerId" in searched_exec and - existing_exec["providerId"] == searched_exec["providerId"] or - "displayName" in existing_exec and "displayName" in searched_exec and - existing_exec["displayName"] == searched_exec["displayName"]): - return i - return -1 - - -def create_or_update_executions(kc, config, realm='master'): - """ - Create or update executions for an authentication flow. - :param kc: Keycloak API access. - :param config: Representation of the authentication flow including it's executions. - :param realm: Realm - :return: tuple (changed, dict(before, after) - WHERE - bool changed indicates if changes have been made - dict(str, str) shows state before and after creation/update - """ - try: - changed = False - after = "" - before = "" - if "authenticationExecutions" in config: - # Get existing executions on the Keycloak server for this alias - existing_executions = kc.get_executions_representation(config, realm=realm) - for new_exec_index, new_exec in enumerate(config["authenticationExecutions"], start=0): - if new_exec["index"] is not None: - new_exec_index = new_exec["index"] - exec_found = False - # Get flowalias parent if given - if new_exec["flowAlias"] is not None: - flow_alias_parent = new_exec["flowAlias"] - else: - flow_alias_parent = config["alias"] - # Check if same providerId or displayName name between existing and new execution - exec_index = find_exec_in_executions(new_exec, existing_executions) - if exec_index != -1: - # Remove key that doesn't need to be compared with existing_exec - exclude_key = ["flowAlias"] - for index_key, key in enumerate(new_exec, start=0): - if new_exec[key] is None: - exclude_key.append(key) - # Compare the executions to see if it need changes - if not is_struct_included(new_exec, existing_executions[exec_index], exclude_key) or exec_index != new_exec_index: - exec_found = True - before += str(existing_executions[exec_index]) + '\n' - id_to_update = existing_executions[exec_index]["id"] - # Remove exec from list in case 2 exec with same name - existing_executions[exec_index].clear() - elif new_exec["providerId"] is not None: - kc.create_execution(new_exec, flowAlias=flow_alias_parent, realm=realm) - exec_found = True - exec_index = new_exec_index - id_to_update = kc.get_executions_representation(config, realm=realm)[exec_index]["id"] - after += str(new_exec) + '\n' - elif new_exec["displayName"] is not None: - kc.create_subflow(new_exec["displayName"], flow_alias_parent, realm=realm) - exec_found = True - exec_index = new_exec_index - id_to_update = kc.get_executions_representation(config, realm=realm)[exec_index]["id"] - after += str(new_exec) + '\n' - if exec_found: - changed = True - if exec_index != -1: - # Update the existing execution - updated_exec = { - "id": id_to_update - } - # add the execution configuration - if new_exec["authenticationConfig"] is not None: - kc.add_authenticationConfig_to_execution(updated_exec["id"], new_exec["authenticationConfig"], realm=realm) - for key in new_exec: - # remove unwanted key for the next API call - if key != "flowAlias" and key != "authenticationConfig": - updated_exec[key] = new_exec[key] - if new_exec["requirement"] is not None: - kc.update_authentication_executions(flow_alias_parent, updated_exec, realm=realm) - diff = exec_index - new_exec_index - kc.change_execution_priority(updated_exec["id"], diff, realm=realm) - after += str(kc.get_executions_representation(config, realm=realm)[new_exec_index]) + '\n' - return changed, dict(before=before, after=after) - except Exception as e: - kc.module.fail_json(msg='Could not create or update executions for authentication flow %s in realm %s: %s' - % (config["alias"], realm, str(e))) - - -def main(): - """ - Module execution - - :return: - """ - argument_spec = keycloak_argument_spec() - - meta_args = dict( - realm=dict(type='str', required=True), - alias=dict(type='str', required=True), - providerId=dict(type='str'), - description=dict(type='str'), - copyFrom=dict(type='str'), - authenticationExecutions=dict(type='list', elements='dict', - options=dict( - providerId=dict(type='str'), - displayName=dict(type='str'), - requirement=dict(choices=["REQUIRED", "ALTERNATIVE", "DISABLED", "CONDITIONAL"], type='str'), - flowAlias=dict(type='str'), - authenticationConfig=dict(type='dict'), - index=dict(type='int'), - )), - state=dict(choices=["absent", "present"], default='present'), - force=dict(type='bool', default=False), - ) - - argument_spec.update(meta_args) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']]) - ) - - result = dict(changed=False, msg='', flow={}) - - # Obtain access token, initialize API - try: - connection_header = get_token(module.params) - except KeycloakError as e: - module.fail_json(msg=str(e)) - - kc = KeycloakAPI(module, connection_header) - - realm = module.params.get('realm') - state = module.params.get('state') - force = module.params.get('force') - - new_auth_repr = { - "alias": module.params.get("alias"), - "copyFrom": module.params.get("copyFrom"), - "providerId": module.params.get("providerId"), - "authenticationExecutions": module.params.get("authenticationExecutions"), - "description": module.params.get("description"), - "builtIn": module.params.get("builtIn"), - "subflow": module.params.get("subflow"), - } - - auth_repr = kc.get_authentication_flow_by_alias(alias=new_auth_repr["alias"], realm=realm) - - # Cater for when it doesn't exist (an empty dict) - if not auth_repr: - if state == 'absent': - # Do nothing and exit - if module._diff: - result['diff'] = dict(before='', after='') - result['changed'] = False - result['end_state'] = {} - result['flow'] = result['end_state'] - result['msg'] = new_auth_repr["alias"] + ' absent' - module.exit_json(**result) - - elif state == 'present': - # Process a creation - result['changed'] = True - - if module._diff: - result['diff'] = dict(before='', after=new_auth_repr) - - if module.check_mode: - module.exit_json(**result) - - # If copyFrom is defined, create authentication flow from a copy - if "copyFrom" in new_auth_repr and new_auth_repr["copyFrom"] is not None: - auth_repr = kc.copy_auth_flow(config=new_auth_repr, realm=realm) - else: # Create an empty authentication flow - auth_repr = kc.create_empty_auth_flow(config=new_auth_repr, realm=realm) - - # If the authentication still not exist on the server, raise an exception. - if auth_repr is None: - result['msg'] = "Authentication just created not found: " + str(new_auth_repr) - module.fail_json(**result) - - # Configure the executions for the flow - create_or_update_executions(kc=kc, config=new_auth_repr, realm=realm) - - # Get executions created - exec_repr = kc.get_executions_representation(config=new_auth_repr, realm=realm) - if exec_repr is not None: - auth_repr["authenticationExecutions"] = exec_repr - result['end_state'] = auth_repr - result['flow'] = result['end_state'] - - else: - if state == 'present': - # Process an update - - if force: # If force option is true - # Delete the actual authentication flow - result['changed'] = True - if module._diff: - result['diff'] = dict(before=auth_repr, after=new_auth_repr) - if module.check_mode: - module.exit_json(**result) - kc.delete_authentication_flow_by_id(id=auth_repr["id"], realm=realm) - # If copyFrom is defined, create authentication flow from a copy - if "copyFrom" in new_auth_repr and new_auth_repr["copyFrom"] is not None: - auth_repr = kc.copy_auth_flow(config=new_auth_repr, realm=realm) - else: # Create an empty authentication flow - auth_repr = kc.create_empty_auth_flow(config=new_auth_repr, realm=realm) - # If the authentication still not exist on the server, raise an exception. - if auth_repr is None: - result['msg'] = "Authentication just created not found: " + str(new_auth_repr) - module.fail_json(**result) - # Configure the executions for the flow - - if module.check_mode: - module.exit_json(**result) - changed, diff = create_or_update_executions(kc=kc, config=new_auth_repr, realm=realm) - result['changed'] |= changed - - if module._diff: - result['diff'] = diff - - # Get executions created - exec_repr = kc.get_executions_representation(config=new_auth_repr, realm=realm) - if exec_repr is not None: - auth_repr["authenticationExecutions"] = exec_repr - result['end_state'] = auth_repr - result['flow'] = result['end_state'] - - else: - # Process a deletion (because state was not 'present') - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=auth_repr, after='') - - if module.check_mode: - module.exit_json(**result) - - # delete it - kc.delete_authentication_flow_by_id(id=auth_repr["id"], realm=realm) - - result['msg'] = 'Authentication flow: {alias} id: {id} is deleted'.format(alias=new_auth_repr['alias'], - id=auth_repr["id"]) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_client.py b/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_client.py deleted file mode 100644 index 82cdab8b..00000000 --- a/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_client.py +++ /dev/null @@ -1,944 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017, Eike Frost -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: keycloak_client - -short_description: Allows administration of Keycloak clients via Keycloak API - - -description: - - This module allows the administration of Keycloak clients via the Keycloak REST API. It - requires access to the REST API via OpenID Connect; the user connecting and the client being - used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate client definition with the scope tailored - to your needs and a user having the expected roles. - - - The names of module options are snake_cased versions of the camelCase ones found in the - Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). - Aliases are provided so camelCased versions can be used as well. - - - The Keycloak API does not always sanity check inputs e.g. you can set - SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful. - If you do not specify a setting, usually a sensible default is chosen. - -options: - state: - description: - - State of the client - - On C(present), the client will be created (or updated if it exists already). - - On C(absent), the client will be removed if it exists - choices: ['present', 'absent'] - default: 'present' - type: str - - realm: - description: - - The realm to create the client in. - type: str - default: master - - client_id: - description: - - Client id of client to be worked on. This is usually an alphanumeric name chosen by - you. Either this or I(id) is required. If you specify both, I(id) takes precedence. - This is 'clientId' in the Keycloak REST API. - aliases: - - clientId - type: str - - id: - description: - - Id of client to be worked on. This is usually an UUID. Either this or I(client_id) - is required. If you specify both, this takes precedence. - type: str - - name: - description: - - Name of the client (this is not the same as I(client_id)). - type: str - - description: - description: - - Description of the client in Keycloak. - type: str - - root_url: - description: - - Root URL appended to relative URLs for this client. - This is 'rootUrl' in the Keycloak REST API. - aliases: - - rootUrl - type: str - - admin_url: - description: - - URL to the admin interface of the client. - This is 'adminUrl' in the Keycloak REST API. - aliases: - - adminUrl - type: str - - base_url: - description: - - Default URL to use when the auth server needs to redirect or link back to the client - This is 'baseUrl' in the Keycloak REST API. - aliases: - - baseUrl - type: str - - enabled: - description: - - Is this client enabled or not? - type: bool - - client_authenticator_type: - description: - - How do clients authenticate with the auth server? Either C(client-secret) or - C(client-jwt) can be chosen. When using C(client-secret), the module parameter - I(secret) can set it, while for C(client-jwt), you can use the keys C(use.jwks.url), - C(jwks.url), and C(jwt.credential.certificate) in the I(attributes) module parameter - to configure its behavior. - This is 'clientAuthenticatorType' in the Keycloak REST API. - choices: ['client-secret', 'client-jwt'] - aliases: - - clientAuthenticatorType - type: str - - secret: - description: - - When using I(client_authenticator_type) C(client-secret) (the default), you can - specify a secret here (otherwise one will be generated if it does not exit). If - changing this secret, the module will not register a change currently (but the - changed secret will be saved). - type: str - - registration_access_token: - description: - - The registration access token provides access for clients to the client registration - service. - This is 'registrationAccessToken' in the Keycloak REST API. - aliases: - - registrationAccessToken - type: str - - default_roles: - description: - - list of default roles for this client. If the client roles referenced do not exist - yet, they will be created. - This is 'defaultRoles' in the Keycloak REST API. - aliases: - - defaultRoles - type: list - elements: str - - redirect_uris: - description: - - Acceptable redirect URIs for this client. - This is 'redirectUris' in the Keycloak REST API. - aliases: - - redirectUris - type: list - elements: str - - web_origins: - description: - - List of allowed CORS origins. - This is 'webOrigins' in the Keycloak REST API. - aliases: - - webOrigins - type: list - elements: str - - not_before: - description: - - Revoke any tokens issued before this date for this client (this is a UNIX timestamp). - This is 'notBefore' in the Keycloak REST API. - type: int - aliases: - - notBefore - - bearer_only: - description: - - The access type of this client is bearer-only. - This is 'bearerOnly' in the Keycloak REST API. - aliases: - - bearerOnly - type: bool - - consent_required: - description: - - If enabled, users have to consent to client access. - This is 'consentRequired' in the Keycloak REST API. - aliases: - - consentRequired - type: bool - - standard_flow_enabled: - description: - - Enable standard flow for this client or not (OpenID connect). - This is 'standardFlowEnabled' in the Keycloak REST API. - aliases: - - standardFlowEnabled - type: bool - - implicit_flow_enabled: - description: - - Enable implicit flow for this client or not (OpenID connect). - This is 'implicitFlowEnabled' in the Keycloak REST API. - aliases: - - implicitFlowEnabled - type: bool - - direct_access_grants_enabled: - description: - - Are direct access grants enabled for this client or not (OpenID connect). - This is 'directAccessGrantsEnabled' in the Keycloak REST API. - aliases: - - directAccessGrantsEnabled - type: bool - - service_accounts_enabled: - description: - - Are service accounts enabled for this client or not (OpenID connect). - This is 'serviceAccountsEnabled' in the Keycloak REST API. - aliases: - - serviceAccountsEnabled - type: bool - - authorization_services_enabled: - description: - - Are authorization services enabled for this client or not (OpenID connect). - This is 'authorizationServicesEnabled' in the Keycloak REST API. - aliases: - - authorizationServicesEnabled - type: bool - - public_client: - description: - - Is the access type for this client public or not. - This is 'publicClient' in the Keycloak REST API. - aliases: - - publicClient - type: bool - - frontchannel_logout: - description: - - Is frontchannel logout enabled for this client or not. - This is 'frontchannelLogout' in the Keycloak REST API. - aliases: - - frontchannelLogout - type: bool - - protocol: - description: - - Type of client (either C(openid-connect) or C(saml). - type: str - choices: ['openid-connect', 'saml'] - - full_scope_allowed: - description: - - Is the "Full Scope Allowed" feature set for this client or not. - This is 'fullScopeAllowed' in the Keycloak REST API. - aliases: - - fullScopeAllowed - type: bool - - node_re_registration_timeout: - description: - - Cluster node re-registration timeout for this client. - This is 'nodeReRegistrationTimeout' in the Keycloak REST API. - type: int - aliases: - - nodeReRegistrationTimeout - - registered_nodes: - description: - - dict of registered cluster nodes (with C(nodename) as the key and last registration - time as the value). - This is 'registeredNodes' in the Keycloak REST API. - type: dict - aliases: - - registeredNodes - - client_template: - description: - - Client template to use for this client. If it does not exist this field will silently - be dropped. - This is 'clientTemplate' in the Keycloak REST API. - type: str - aliases: - - clientTemplate - - use_template_config: - description: - - Whether or not to use configuration from the I(client_template). - This is 'useTemplateConfig' in the Keycloak REST API. - aliases: - - useTemplateConfig - type: bool - - use_template_scope: - description: - - Whether or not to use scope configuration from the I(client_template). - This is 'useTemplateScope' in the Keycloak REST API. - aliases: - - useTemplateScope - type: bool - - use_template_mappers: - description: - - Whether or not to use mapper configuration from the I(client_template). - This is 'useTemplateMappers' in the Keycloak REST API. - aliases: - - useTemplateMappers - type: bool - - surrogate_auth_required: - description: - - Whether or not surrogate auth is required. - This is 'surrogateAuthRequired' in the Keycloak REST API. - aliases: - - surrogateAuthRequired - type: bool - - authorization_settings: - description: - - a data structure defining the authorization settings for this client. For reference, - please see the Keycloak API docs at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html#_resourceserverrepresentation). - This is 'authorizationSettings' in the Keycloak REST API. - type: dict - aliases: - - authorizationSettings - - authentication_flow_binding_overrides: - description: - - Override realm authentication flow bindings. - type: dict - aliases: - - authenticationFlowBindingOverrides - version_added: 3.4.0 - - protocol_mappers: - description: - - a list of dicts defining protocol mappers for this client. - This is 'protocolMappers' in the Keycloak REST API. - aliases: - - protocolMappers - type: list - elements: dict - suboptions: - consentRequired: - description: - - Specifies whether a user needs to provide consent to a client for this mapper to be active. - type: bool - - consentText: - description: - - The human-readable name of the consent the user is presented to accept. - type: str - - id: - description: - - Usually a UUID specifying the internal ID of this protocol mapper instance. - type: str - - name: - description: - - The name of this protocol mapper. - type: str - - protocol: - description: - - This is either C(openid-connect) or C(saml), this specifies for which protocol this protocol mapper. - is active. - choices: ['openid-connect', 'saml'] - type: str - - protocolMapper: - description: - - The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is - impossible to provide since this may be extended through SPIs by the user of Keycloak, - by default Keycloak as of 3.4 ships with at least - - C(docker-v2-allow-all-mapper) - - C(oidc-address-mapper) - - C(oidc-full-name-mapper) - - C(oidc-group-membership-mapper) - - C(oidc-hardcoded-claim-mapper) - - C(oidc-hardcoded-role-mapper) - - C(oidc-role-name-mapper) - - C(oidc-script-based-protocol-mapper) - - C(oidc-sha256-pairwise-sub-mapper) - - C(oidc-usermodel-attribute-mapper) - - C(oidc-usermodel-client-role-mapper) - - C(oidc-usermodel-property-mapper) - - C(oidc-usermodel-realm-role-mapper) - - C(oidc-usersessionmodel-note-mapper) - - C(saml-group-membership-mapper) - - C(saml-hardcode-attribute-mapper) - - C(saml-hardcode-role-mapper) - - C(saml-role-list-mapper) - - C(saml-role-name-mapper) - - C(saml-user-attribute-mapper) - - C(saml-user-property-mapper) - - C(saml-user-session-note-mapper) - - An exhaustive list of available mappers on your installation can be obtained on - the admin console by going to Server Info -> Providers and looking under - 'protocol-mapper'. - type: str - - config: - description: - - Dict specifying the configuration options for the protocol mapper; the - contents differ depending on the value of I(protocolMapper) and are not documented - other than by the source of the mappers and its parent class(es). An example is given - below. It is easiest to obtain valid config values by dumping an already-existing - protocol mapper configuration through check-mode in the I(existing) field. - type: dict - - attributes: - description: - - A dict of further attributes for this client. This can contain various configuration - settings; an example is given in the examples section. While an exhaustive list of - permissible options is not available; possible options as of Keycloak 3.4 are listed below. The Keycloak - API does not validate whether a given option is appropriate for the protocol used; if specified - anyway, Keycloak will simply not use it. - type: dict - suboptions: - saml.authnstatement: - description: - - For SAML clients, boolean specifying whether or not a statement containing method and timestamp - should be included in the login response. - - saml.client.signature: - description: - - For SAML clients, boolean specifying whether a client signature is required and validated. - - saml.encrypt: - description: - - Boolean specifying whether SAML assertions should be encrypted with the client's public key. - - saml.force.post.binding: - description: - - For SAML clients, boolean specifying whether always to use POST binding for responses. - - saml.onetimeuse.condition: - description: - - For SAML clients, boolean specifying whether a OneTimeUse condition should be included in login responses. - - saml.server.signature: - description: - - Boolean specifying whether SAML documents should be signed by the realm. - - saml.server.signature.keyinfo.ext: - description: - - For SAML clients, boolean specifying whether REDIRECT signing key lookup should be optimized through inclusion - of the signing key id in the SAML Extensions element. - - saml.signature.algorithm: - description: - - Signature algorithm used to sign SAML documents. One of C(RSA_SHA256), C(RSA_SHA1), C(RSA_SHA512), or C(DSA_SHA1). - - saml.signing.certificate: - description: - - SAML signing key certificate, base64-encoded. - - saml.signing.private.key: - description: - - SAML signing key private key, base64-encoded. - - saml_assertion_consumer_url_post: - description: - - SAML POST Binding URL for the client's assertion consumer service (login responses). - - saml_assertion_consumer_url_redirect: - description: - - SAML Redirect Binding URL for the client's assertion consumer service (login responses). - - - saml_force_name_id_format: - description: - - For SAML clients, Boolean specifying whether to ignore requested NameID subject format and using the configured one instead. - - saml_name_id_format: - description: - - For SAML clients, the NameID format to use (one of C(username), C(email), C(transient), or C(persistent)) - - saml_signature_canonicalization_method: - description: - - SAML signature canonicalization method. This is one of four values, namely - C(http://www.w3.org/2001/10/xml-exc-c14n#) for EXCLUSIVE, - C(http://www.w3.org/2001/10/xml-exc-c14n#WithComments) for EXCLUSIVE_WITH_COMMENTS, - C(http://www.w3.org/TR/2001/REC-xml-c14n-20010315) for INCLUSIVE, and - C(http://www.w3.org/TR/2001/REC-xml-c14n-20010315#WithComments) for INCLUSIVE_WITH_COMMENTS. - - saml_single_logout_service_url_post: - description: - - SAML POST binding url for the client's single logout service. - - saml_single_logout_service_url_redirect: - description: - - SAML redirect binding url for the client's single logout service. - - user.info.response.signature.alg: - description: - - For OpenID-Connect clients, JWA algorithm for signed UserInfo-endpoint responses. One of C(RS256) or C(unsigned). - - request.object.signature.alg: - description: - - For OpenID-Connect clients, JWA algorithm which the client needs to use when sending - OIDC request object. One of C(any), C(none), C(RS256). - - use.jwks.url: - description: - - For OpenID-Connect clients, boolean specifying whether to use a JWKS URL to obtain client - public keys. - - jwks.url: - description: - - For OpenID-Connect clients, URL where client keys in JWK are stored. - - jwt.credential.certificate: - description: - - For OpenID-Connect clients, client certificate for validating JWT issued by - client and signed by its key, base64-encoded. - -extends_documentation_fragment: -- community.general.keycloak - -author: - - Eike Frost (@eikef) -''' - -EXAMPLES = ''' -- name: Create or update Keycloak client (minimal example), authentication with credentials - community.general.keycloak_client: - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - client_id: test - state: present - delegate_to: localhost - - -- name: Create or update Keycloak client (minimal example), authentication with token - community.general.keycloak_client: - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - token: TOKEN - client_id: test - state: present - delegate_to: localhost - - -- name: Delete a Keycloak client - community.general.keycloak_client: - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - client_id: test - state: absent - delegate_to: localhost - - -- name: Create or update a Keycloak client (with all the bells and whistles) - community.general.keycloak_client: - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - state: present - realm: master - client_id: test - id: d8b127a3-31f6-44c8-a7e4-4ab9a3e78d95 - name: this_is_a_test - description: Description of this wonderful client - root_url: https://www.example.com/ - admin_url: https://www.example.com/admin_url - base_url: basepath - enabled: True - client_authenticator_type: client-secret - secret: REALLYWELLKEPTSECRET - redirect_uris: - - https://www.example.com/* - - http://localhost:8888/ - web_origins: - - https://www.example.com/* - not_before: 1507825725 - bearer_only: False - consent_required: False - standard_flow_enabled: True - implicit_flow_enabled: False - direct_access_grants_enabled: False - service_accounts_enabled: False - authorization_services_enabled: False - public_client: False - frontchannel_logout: False - protocol: openid-connect - full_scope_allowed: false - node_re_registration_timeout: -1 - client_template: test - use_template_config: False - use_template_scope: false - use_template_mappers: no - registered_nodes: - node01.example.com: 1507828202 - registration_access_token: eyJWT_TOKEN - surrogate_auth_required: false - default_roles: - - test01 - - test02 - authentication_flow_binding_overrides: - browser: 4c90336b-bf1d-4b87-916d-3677ba4e5fbb - protocol_mappers: - - config: - access.token.claim: True - claim.name: "family_name" - id.token.claim: True - jsonType.label: String - user.attribute: lastName - userinfo.token.claim: True - consentRequired: True - consentText: "${familyName}" - name: family name - protocol: openid-connect - protocolMapper: oidc-usermodel-property-mapper - - config: - attribute.name: Role - attribute.nameformat: Basic - single: false - consentRequired: false - name: role list - protocol: saml - protocolMapper: saml-role-list-mapper - attributes: - saml.authnstatement: True - saml.client.signature: True - saml.force.post.binding: True - saml.server.signature: True - saml.signature.algorithm: RSA_SHA256 - saml.signing.certificate: CERTIFICATEHERE - saml.signing.private.key: PRIVATEKEYHERE - saml_force_name_id_format: False - saml_name_id_format: username - saml_signature_canonicalization_method: "http://www.w3.org/2001/10/xml-exc-c14n#" - user.info.response.signature.alg: RS256 - request.object.signature.alg: RS256 - use.jwks.url: true - jwks.url: JWKS_URL_FOR_CLIENT_AUTH_JWT - jwt.credential.certificate: JWT_CREDENTIAL_CERTIFICATE_FOR_CLIENT_AUTH - delegate_to: localhost -''' - -RETURN = ''' -msg: - description: Message as to what action was taken. - returned: always - type: str - sample: "Client testclient has been updated" - -proposed: - description: Representation of proposed client. - returned: always - type: dict - sample: { - clientId: "test" - } - -existing: - description: Representation of existing client (sample is truncated). - returned: always - type: dict - sample: { - "adminUrl": "http://www.example.com/admin_url", - "attributes": { - "request.object.signature.alg": "RS256", - } - } - -end_state: - description: Representation of client after module execution (sample is truncated). - returned: on success - type: dict - sample: { - "adminUrl": "http://www.example.com/admin_url", - "attributes": { - "request.object.signature.alg": "RS256", - } - } -''' - -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError -from ansible.module_utils.basic import AnsibleModule - - -def normalise_cr(clientrep, remove_ids=False): - """ Re-sorts any properties where the order so that diff's is minimised, and adds default values where appropriate so that the - the change detection is more effective. - - :param clientrep: the clientrep dict to be sanitized - :param remove_ids: If set to true, then the unique ID's of objects is removed to make the diff and checks for changed - not alert when the ID's of objects are not usually known, (e.g. for protocol_mappers) - :return: normalised clientrep dict - """ - # Avoid the dict passed in to be modified - clientrep = clientrep.copy() - - if 'attributes' in clientrep: - clientrep['attributes'] = list(sorted(clientrep['attributes'])) - - if 'redirectUris' in clientrep: - clientrep['redirectUris'] = list(sorted(clientrep['redirectUris'])) - - if 'protocolMappers' in clientrep: - clientrep['protocolMappers'] = sorted(clientrep['protocolMappers'], key=lambda x: (x.get('name'), x.get('protocol'), x.get('protocolMapper'))) - for mapper in clientrep['protocolMappers']: - if remove_ids: - mapper.pop('id', None) - - # Set to a default value. - mapper['consentRequired'] = mapper.get('consentRequired', False) - - return clientrep - - -def sanitize_cr(clientrep): - """ Removes probably sensitive details from a client representation. - - :param clientrep: the clientrep dict to be sanitized - :return: sanitized clientrep dict - """ - result = clientrep.copy() - if 'secret' in result: - result['secret'] = 'no_log' - if 'attributes' in result: - if 'saml.signing.private.key' in result['attributes']: - result['attributes']['saml.signing.private.key'] = 'no_log' - return normalise_cr(result) - - -def main(): - """ - Module execution - - :return: - """ - argument_spec = keycloak_argument_spec() - - protmapper_spec = dict( - consentRequired=dict(type='bool'), - consentText=dict(type='str'), - id=dict(type='str'), - name=dict(type='str'), - protocol=dict(type='str', choices=['openid-connect', 'saml']), - protocolMapper=dict(type='str'), - config=dict(type='dict'), - ) - - meta_args = dict( - state=dict(default='present', choices=['present', 'absent']), - realm=dict(type='str', default='master'), - - id=dict(type='str'), - client_id=dict(type='str', aliases=['clientId']), - name=dict(type='str'), - description=dict(type='str'), - root_url=dict(type='str', aliases=['rootUrl']), - admin_url=dict(type='str', aliases=['adminUrl']), - base_url=dict(type='str', aliases=['baseUrl']), - surrogate_auth_required=dict(type='bool', aliases=['surrogateAuthRequired']), - enabled=dict(type='bool'), - client_authenticator_type=dict(type='str', choices=['client-secret', 'client-jwt'], aliases=['clientAuthenticatorType']), - secret=dict(type='str', no_log=True), - registration_access_token=dict(type='str', aliases=['registrationAccessToken'], no_log=True), - default_roles=dict(type='list', elements='str', aliases=['defaultRoles']), - redirect_uris=dict(type='list', elements='str', aliases=['redirectUris']), - web_origins=dict(type='list', elements='str', aliases=['webOrigins']), - not_before=dict(type='int', aliases=['notBefore']), - bearer_only=dict(type='bool', aliases=['bearerOnly']), - consent_required=dict(type='bool', aliases=['consentRequired']), - standard_flow_enabled=dict(type='bool', aliases=['standardFlowEnabled']), - implicit_flow_enabled=dict(type='bool', aliases=['implicitFlowEnabled']), - direct_access_grants_enabled=dict(type='bool', aliases=['directAccessGrantsEnabled']), - service_accounts_enabled=dict(type='bool', aliases=['serviceAccountsEnabled']), - authorization_services_enabled=dict(type='bool', aliases=['authorizationServicesEnabled']), - public_client=dict(type='bool', aliases=['publicClient']), - frontchannel_logout=dict(type='bool', aliases=['frontchannelLogout']), - protocol=dict(type='str', choices=['openid-connect', 'saml']), - attributes=dict(type='dict'), - full_scope_allowed=dict(type='bool', aliases=['fullScopeAllowed']), - node_re_registration_timeout=dict(type='int', aliases=['nodeReRegistrationTimeout']), - registered_nodes=dict(type='dict', aliases=['registeredNodes']), - client_template=dict(type='str', aliases=['clientTemplate']), - use_template_config=dict(type='bool', aliases=['useTemplateConfig']), - use_template_scope=dict(type='bool', aliases=['useTemplateScope']), - use_template_mappers=dict(type='bool', aliases=['useTemplateMappers']), - authentication_flow_binding_overrides=dict(type='dict', aliases=['authenticationFlowBindingOverrides']), - protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec, aliases=['protocolMappers']), - authorization_settings=dict(type='dict', aliases=['authorizationSettings']), - ) - - argument_spec.update(meta_args) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['client_id', 'id'], - ['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) - - result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) - - # Obtain access token, initialize API - try: - connection_header = get_token(module.params) - except KeycloakError as e: - module.fail_json(msg=str(e)) - - kc = KeycloakAPI(module, connection_header) - - realm = module.params.get('realm') - cid = module.params.get('id') - state = module.params.get('state') - - # Filter and map the parameters names that apply to the client - client_params = [x for x in module.params - if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm'] and - module.params.get(x) is not None] - - # See if it already exists in Keycloak - if cid is None: - before_client = kc.get_client_by_clientid(module.params.get('client_id'), realm=realm) - if before_client is not None: - cid = before_client['id'] - else: - before_client = kc.get_client_by_id(cid, realm=realm) - - if before_client is None: - before_client = {} - - # Build a proposed changeset from parameters given to this module - changeset = {} - - for client_param in client_params: - new_param_value = module.params.get(client_param) - - # some lists in the Keycloak API are sorted, some are not. - if isinstance(new_param_value, list): - if client_param in ['attributes']: - try: - new_param_value = sorted(new_param_value) - except TypeError: - pass - # Unfortunately, the ansible argument spec checker introduces variables with null values when - # they are not specified - if client_param == 'protocol_mappers': - new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value] - - changeset[camel(client_param)] = new_param_value - - # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) - desired_client = before_client.copy() - desired_client.update(changeset) - - result['proposed'] = sanitize_cr(changeset) - result['existing'] = sanitize_cr(before_client) - - # Cater for when it doesn't exist (an empty dict) - if not before_client: - if state == 'absent': - # Do nothing and exit - if module._diff: - result['diff'] = dict(before='', after='') - result['changed'] = False - result['end_state'] = {} - result['msg'] = 'Client does not exist; doing nothing.' - module.exit_json(**result) - - # Process a creation - result['changed'] = True - - if 'clientId' not in desired_client: - module.fail_json(msg='client_id needs to be specified when creating a new client') - - if module._diff: - result['diff'] = dict(before='', after=sanitize_cr(desired_client)) - - if module.check_mode: - module.exit_json(**result) - - # create it - kc.create_client(desired_client, realm=realm) - after_client = kc.get_client_by_clientid(desired_client['clientId'], realm=realm) - - result['end_state'] = sanitize_cr(after_client) - - result['msg'] = 'Client %s has been created.' % desired_client['clientId'] - module.exit_json(**result) - - else: - if state == 'present': - # Process an update - result['changed'] = True - - if module.check_mode: - # We can only compare the current client with the proposed updates we have - before_norm = normalise_cr(before_client, remove_ids=True) - desired_norm = normalise_cr(desired_client, remove_ids=True) - if module._diff: - result['diff'] = dict(before=sanitize_cr(before_norm), - after=sanitize_cr(desired_norm)) - result['changed'] = (before_norm != desired_norm) - - module.exit_json(**result) - - # do the update - kc.update_client(cid, desired_client, realm=realm) - - after_client = kc.get_client_by_id(cid, realm=realm) - if before_client == after_client: - result['changed'] = False - if module._diff: - result['diff'] = dict(before=sanitize_cr(before_client), - after=sanitize_cr(after_client)) - - result['end_state'] = sanitize_cr(after_client) - - result['msg'] = 'Client %s has been updated.' % desired_client['clientId'] - module.exit_json(**result) - - else: - # Process a deletion (because state was not 'present') - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=sanitize_cr(before_client), after='') - - if module.check_mode: - module.exit_json(**result) - - # delete it - kc.delete_client(cid, realm=realm) - result['proposed'] = {} - - result['end_state'] = {} - - result['msg'] = 'Client %s has been deleted.' % before_client['clientId'] - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_client_rolemapping.py b/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_client_rolemapping.py deleted file mode 100644 index b7cd70c1..00000000 --- a/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_client_rolemapping.py +++ /dev/null @@ -1,350 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: keycloak_client_rolemapping - -short_description: Allows administration of Keycloak client_rolemapping with the Keycloak API - -version_added: 3.5.0 - -description: - - This module allows you to add, remove or modify Keycloak client_rolemapping with the Keycloak REST API. - It requires access to the REST API via OpenID Connect; the user connecting and the client being - used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate client definition with the scope tailored - to your needs and a user having the expected roles. - - - The names of module options are snake_cased versions of the camelCase ones found in the - Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). - - - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will - be returned that way by this module. You may pass single values for attributes when calling the module, - and this will be translated into a list suitable for the API. - - - When updating a client_rolemapping, where possible provide the role ID to the module. This removes a lookup - to the API to translate the name into the role ID. - - -options: - state: - description: - - State of the client_rolemapping. - - On C(present), the client_rolemapping will be created if it does not yet exist, or updated with the parameters you provide. - - On C(absent), the client_rolemapping will be removed if it exists. - default: 'present' - type: str - choices: - - present - - absent - - realm: - type: str - description: - - They Keycloak realm under which this role_representation resides. - default: 'master' - - group_name: - type: str - description: - - Name of the group to be mapped. - - This parameter is required (can be replaced by gid for less API call). - - gid: - type: str - description: - - Id of the group to be mapped. - - This parameter is not required for updating or deleting the rolemapping but - providing it will reduce the number of API calls required. - - client_id: - type: str - description: - - Name of the client to be mapped (different than I(cid)). - - This parameter is required (can be replaced by cid for less API call). - - cid: - type: str - description: - - Id of the client to be mapped. - - This parameter is not required for updating or deleting the rolemapping but - providing it will reduce the number of API calls required. - - roles: - description: - - Roles to be mapped to the group. - type: list - elements: dict - suboptions: - name: - type: str - description: - - Name of the role_representation. - - This parameter is required only when creating or updating the role_representation. - id: - type: str - description: - - The unique identifier for this role_representation. - - This parameter is not required for updating or deleting a role_representation but - providing it will reduce the number of API calls required. - -extends_documentation_fragment: -- community.general.keycloak - - -author: - - Gaëtan Daubresse (@Gaetan2907) -''' - -EXAMPLES = ''' -- name: Map a client role to a group, authentication with credentials - community.general.keycloak_client_rolemappings: - realm: MyCustomRealm - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - state: present - client_id: client1 - group_name: group1 - roles: - - name: role_name1 - id: role_id1 - - name: role_name2 - id: role_id2 - delegate_to: localhost - -- name: Map a client role to a group, authentication with token - community.general.keycloak_client_rolemappings: - realm: MyCustomRealm - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - token: TOKEN - state: present - client_id: client1 - group_name: group1 - roles: - - name: role_name1 - id: role_id1 - - name: role_name2 - id: role_id2 - delegate_to: localhost - -- name: Unmap client role from a group - community.general.keycloak_client_rolemappings: - realm: MyCustomRealm - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - state: absent - client_id: client1 - group_name: group1 - roles: - - name: role_name1 - id: role_id1 - - name: role_name2 - id: role_id2 - delegate_to: localhost - -''' - -RETURN = ''' -msg: - description: Message as to what action was taken. - returned: always - type: str - sample: "Role role1 assigned to group group1." - -proposed: - description: Representation of proposed client role mapping. - returned: always - type: dict - sample: { - clientId: "test" - } - -existing: - description: - - Representation of existing client role mapping. - - The sample is truncated. - returned: always - type: dict - sample: { - "adminUrl": "http://www.example.com/admin_url", - "attributes": { - "request.object.signature.alg": "RS256", - } - } - -end_state: - description: - - Representation of client role mapping after module execution. - - The sample is truncated. - returned: on success - type: dict - sample: { - "adminUrl": "http://www.example.com/admin_url", - "attributes": { - "request.object.signature.alg": "RS256", - } - } -''' - -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError, is_struct_included -from ansible.module_utils.basic import AnsibleModule - - -def main(): - """ - Module execution - - :return: - """ - argument_spec = keycloak_argument_spec() - - roles_spec = dict( - name=dict(type='str'), - id=dict(type='str'), - ) - - meta_args = dict( - state=dict(default='present', choices=['present', 'absent']), - realm=dict(default='master'), - gid=dict(type='str'), - group_name=dict(type='str'), - cid=dict(type='str'), - client_id=dict(type='str'), - roles=dict(type='list', elements='dict', options=roles_spec), - ) - - argument_spec.update(meta_args) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) - - result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) - - # Obtain access token, initialize API - try: - connection_header = get_token(module.params) - except KeycloakError as e: - module.fail_json(msg=str(e)) - - kc = KeycloakAPI(module, connection_header) - - realm = module.params.get('realm') - state = module.params.get('state') - cid = module.params.get('cid') - client_id = module.params.get('client_id') - gid = module.params.get('gid') - group_name = module.params.get('group_name') - roles = module.params.get('roles') - - # Check the parameters - if cid is None and client_id is None: - module.fail_json(msg='Either the `client_id` or `cid` has to be specified.') - if gid is None and group_name is None: - module.fail_json(msg='Either the `group_name` or `gid` has to be specified.') - - # Get the potential missing parameters - if gid is None: - group_rep = kc.get_group_by_name(group_name, realm=realm) - if group_rep is not None: - gid = group_rep['id'] - else: - module.fail_json(msg='Could not fetch group %s:' % group_name) - if cid is None: - cid = kc.get_client_id(client_id, realm=realm) - if cid is None: - module.fail_json(msg='Could not fetch client %s:' % client_id) - if roles is None: - module.exit_json(msg="Nothing to do (no roles specified).") - else: - for role_index, role in enumerate(roles, start=0): - if role['name'] is None and role['id'] is None: - module.fail_json(msg='Either the `name` or `id` has to be specified on each role.') - # Fetch missing role_id - if role['id'] is None: - role_id = kc.get_client_role_by_name(gid, cid, role['name'], realm=realm) - if role_id is not None: - role['id'] = role_id - else: - module.fail_json(msg='Could not fetch role %s:' % (role['name'])) - # Fetch missing role_name - else: - role['name'] = kc.get_client_rolemapping_by_id(gid, cid, role['id'], realm=realm)['name'] - if role['name'] is None: - module.fail_json(msg='Could not fetch role %s' % (role['id'])) - - # Get effective client-level role mappings - available_roles_before = kc.get_client_available_rolemappings(gid, cid, realm=realm) - assigned_roles_before = kc.get_client_composite_rolemappings(gid, cid, realm=realm) - - result['existing'] = assigned_roles_before - result['proposed'] = roles - - update_roles = [] - for role_index, role in enumerate(roles, start=0): - # Fetch roles to assign if state present - if state == 'present': - for available_role in available_roles_before: - if role['name'] == available_role['name']: - update_roles.append({ - 'id': role['id'], - 'name': role['name'], - }) - # Fetch roles to remove if state absent - else: - for assigned_role in assigned_roles_before: - if role['name'] == assigned_role['name']: - update_roles.append({ - 'id': role['id'], - 'name': role['name'], - }) - - if len(update_roles): - if state == 'present': - # Assign roles - result['changed'] = True - if module._diff: - result['diff'] = dict(before=assigned_roles_before, after=update_roles) - if module.check_mode: - module.exit_json(**result) - kc.add_group_rolemapping(gid, cid, update_roles, realm=realm) - result['msg'] = 'Roles %s assigned to group %s.' % (update_roles, group_name) - assigned_roles_after = kc.get_client_composite_rolemappings(gid, cid, realm=realm) - result['end_state'] = assigned_roles_after - module.exit_json(**result) - else: - # Remove mapping of role - result['changed'] = True - if module._diff: - result['diff'] = dict(before=assigned_roles_before, after=update_roles) - if module.check_mode: - module.exit_json(**result) - kc.delete_group_rolemapping(gid, cid, update_roles, realm=realm) - result['msg'] = 'Roles %s removed from group %s.' % (update_roles, group_name) - assigned_roles_after = kc.get_client_composite_rolemappings(gid, cid, realm=realm) - result['end_state'] = assigned_roles_after - module.exit_json(**result) - # Do nothing - else: - result['changed'] = False - result['msg'] = 'Nothing to do, roles %s are correctly mapped with group %s.' % (roles, group_name) - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_clientscope.py b/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_clientscope.py deleted file mode 100644 index 2deab554..00000000 --- a/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_clientscope.py +++ /dev/null @@ -1,499 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: keycloak_clientscope - -short_description: Allows administration of Keycloak client_scopes via Keycloak API - -version_added: 3.4.0 - -description: - - This module allows you to add, remove or modify Keycloak client_scopes via the Keycloak REST API. - It requires access to the REST API via OpenID Connect; the user connecting and the client being - used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate client definition with the scope tailored - to your needs and a user having the expected roles. - - - The names of module options are snake_cased versions of the camelCase ones found in the - Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). - - - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will - be returned that way by this module. You may pass single values for attributes when calling the module, - and this will be translated into a list suitable for the API. - - - When updating a client_scope, where possible provide the client_scope ID to the module. This removes a lookup - to the API to translate the name into the client_scope ID. - - -options: - state: - description: - - State of the client_scope. - - On C(present), the client_scope will be created if it does not yet exist, or updated with the parameters you provide. - - On C(absent), the client_scope will be removed if it exists. - default: 'present' - type: str - choices: - - present - - absent - - name: - type: str - description: - - Name of the client_scope. - - This parameter is required only when creating or updating the client_scope. - - realm: - type: str - description: - - They Keycloak realm under which this client_scope resides. - default: 'master' - - id: - type: str - description: - - The unique identifier for this client_scope. - - This parameter is not required for updating or deleting a client_scope but - providing it will reduce the number of API calls required. - - description: - type: str - description: - - Description for this client_scope. - - This parameter is not required for updating or deleting a client_scope. - - protocol: - description: - - Type of client. - choices: ['openid-connect', 'saml', 'wsfed'] - type: str - - protocol_mappers: - description: - - A list of dicts defining protocol mappers for this client. - - This is 'protocolMappers' in the Keycloak REST API. - aliases: - - protocolMappers - type: list - elements: dict - suboptions: - protocol: - description: - - This specifies for which protocol this protocol mapper. - - is active. - choices: ['openid-connect', 'saml', 'wsfed'] - type: str - - protocolMapper: - description: - - "The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is - impossible to provide since this may be extended through SPIs by the user of Keycloak, - by default Keycloak as of 3.4 ships with at least:" - - C(docker-v2-allow-all-mapper) - - C(oidc-address-mapper) - - C(oidc-full-name-mapper) - - C(oidc-group-membership-mapper) - - C(oidc-hardcoded-claim-mapper) - - C(oidc-hardcoded-role-mapper) - - C(oidc-role-name-mapper) - - C(oidc-script-based-protocol-mapper) - - C(oidc-sha256-pairwise-sub-mapper) - - C(oidc-usermodel-attribute-mapper) - - C(oidc-usermodel-client-role-mapper) - - C(oidc-usermodel-property-mapper) - - C(oidc-usermodel-realm-role-mapper) - - C(oidc-usersessionmodel-note-mapper) - - C(saml-group-membership-mapper) - - C(saml-hardcode-attribute-mapper) - - C(saml-hardcode-role-mapper) - - C(saml-role-list-mapper) - - C(saml-role-name-mapper) - - C(saml-user-attribute-mapper) - - C(saml-user-property-mapper) - - C(saml-user-session-note-mapper) - - An exhaustive list of available mappers on your installation can be obtained on - the admin console by going to Server Info -> Providers and looking under - 'protocol-mapper'. - type: str - - name: - description: - - The name of this protocol mapper. - type: str - - id: - description: - - Usually a UUID specifying the internal ID of this protocol mapper instance. - type: str - - config: - description: - - Dict specifying the configuration options for the protocol mapper; the - contents differ depending on the value of I(protocolMapper) and are not documented - other than by the source of the mappers and its parent class(es). An example is given - below. It is easiest to obtain valid config values by dumping an already-existing - protocol mapper configuration through check-mode in the C(existing) return value. - type: dict - - attributes: - type: dict - description: - - A dict of key/value pairs to set as custom attributes for the client_scope. - - Values may be single values (for example a string) or a list of strings. - -extends_documentation_fragment: -- community.general.keycloak - - -author: - - Gaëtan Daubresse (@Gaetan2907) -''' - -EXAMPLES = ''' -- name: Create a Keycloak client_scopes, authentication with credentials - community.general.keycloak_clientscope: - name: my-new-kc-clientscope - realm: MyCustomRealm - state: present - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - delegate_to: localhost - -- name: Create a Keycloak client_scopes, authentication with token - community.general.keycloak_clientscope: - name: my-new-kc-clientscope - realm: MyCustomRealm - state: present - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - token: TOKEN - delegate_to: localhost - -- name: Delete a keycloak client_scopes - community.general.keycloak_clientscope: - id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' - state: absent - realm: MyCustomRealm - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - delegate_to: localhost - -- name: Delete a Keycloak client_scope based on name - community.general.keycloak_clientscope: - name: my-clientscope-for-deletion - state: absent - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - delegate_to: localhost - -- name: Update the name of a Keycloak client_scope - community.general.keycloak_clientscope: - id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' - name: an-updated-kc-clientscope-name - state: present - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - delegate_to: localhost - -- name: Create a Keycloak client_scope with some custom attributes - community.general.keycloak_clientscope: - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - name: my-new_clientscope - description: description-of-clientscope - protocol: openid-connect - protocol_mappers: - - config: - access.token.claim: True - claim.name: "family_name" - id.token.claim: True - jsonType.label: String - user.attribute: lastName - userinfo.token.claim: True - name: family name - protocol: openid-connect - protocolMapper: oidc-usermodel-property-mapper - - config: - attribute.name: Role - attribute.nameformat: Basic - single: false - name: role list - protocol: saml - protocolMapper: saml-role-list-mapper - attributes: - attrib1: value1 - attrib2: value2 - attrib3: - - with - - numerous - - individual - - list - - items - delegate_to: localhost -''' - -RETURN = ''' -msg: - description: Message as to what action was taken. - returned: always - type: str - sample: "Client_scope testclientscope has been updated" - -proposed: - description: Representation of proposed client scope. - returned: always - type: dict - sample: { - clientId: "test" - } - -existing: - description: Representation of existing client scope (sample is truncated). - returned: always - type: dict - sample: { - "adminUrl": "http://www.example.com/admin_url", - "attributes": { - "request.object.signature.alg": "RS256", - } - } - -end_state: - description: Representation of client scope after module execution (sample is truncated). - returned: on success - type: dict - sample: { - "adminUrl": "http://www.example.com/admin_url", - "attributes": { - "request.object.signature.alg": "RS256", - } - } -''' - -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError, is_struct_included -from ansible.module_utils.basic import AnsibleModule - - -def sanitize_cr(clientscoperep): - """ Removes probably sensitive details from a clientscoperep representation. - - :param clientscoperep: the clientscoperep dict to be sanitized - :return: sanitized clientrep dict - """ - result = clientscoperep.copy() - if 'secret' in result: - result['secret'] = 'no_log' - if 'attributes' in result: - if 'saml.signing.private.key' in result['attributes']: - result['attributes']['saml.signing.private.key'] = 'no_log' - return result - - -def main(): - """ - Module execution - - :return: - """ - argument_spec = keycloak_argument_spec() - - protmapper_spec = dict( - id=dict(type='str'), - name=dict(type='str'), - protocol=dict(type='str', choices=['openid-connect', 'saml', 'wsfed']), - protocolMapper=dict(type='str'), - config=dict(type='dict'), - ) - - meta_args = dict( - state=dict(default='present', choices=['present', 'absent']), - realm=dict(default='master'), - id=dict(type='str'), - name=dict(type='str'), - description=dict(type='str'), - protocol=dict(type='str', choices=['openid-connect', 'saml', 'wsfed']), - attributes=dict(type='dict'), - protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec, aliases=['protocolMappers']), - ) - - argument_spec.update(meta_args) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['id', 'name'], - ['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) - - result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) - - # Obtain access token, initialize API - try: - connection_header = get_token(module.params) - except KeycloakError as e: - module.fail_json(msg=str(e)) - - kc = KeycloakAPI(module, connection_header) - - realm = module.params.get('realm') - state = module.params.get('state') - cid = module.params.get('id') - name = module.params.get('name') - protocol_mappers = module.params.get('protocol_mappers') - - # Filter and map the parameters names that apply to the client scope - clientscope_params = [x for x in module.params - if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm'] and - module.params.get(x) is not None] - - # See if it already exists in Keycloak - if cid is None: - before_clientscope = kc.get_clientscope_by_name(name, realm=realm) - else: - before_clientscope = kc.get_clientscope_by_clientscopeid(cid, realm=realm) - - if before_clientscope is None: - before_clientscope = {} - - # Build a proposed changeset from parameters given to this module - changeset = {} - - for clientscope_param in clientscope_params: - new_param_value = module.params.get(clientscope_param) - - # some lists in the Keycloak API are sorted, some are not. - if isinstance(new_param_value, list): - if clientscope_param in ['attributes']: - try: - new_param_value = sorted(new_param_value) - except TypeError: - pass - # Unfortunately, the ansible argument spec checker introduces variables with null values when - # they are not specified - if clientscope_param == 'protocol_mappers': - new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value] - changeset[camel(clientscope_param)] = new_param_value - - # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) - desired_clientscope = before_clientscope.copy() - desired_clientscope.update(changeset) - - # Cater for when it doesn't exist (an empty dict) - if not before_clientscope: - if state == 'absent': - # Do nothing and exit - if module._diff: - result['diff'] = dict(before='', after='') - result['changed'] = False - result['end_state'] = {} - result['msg'] = 'Clientscope does not exist; doing nothing.' - module.exit_json(**result) - - # Process a creation - result['changed'] = True - - if name is None: - module.fail_json(msg='name must be specified when creating a new clientscope') - - if module._diff: - result['diff'] = dict(before='', after=sanitize_cr(desired_clientscope)) - - if module.check_mode: - module.exit_json(**result) - - # create it - kc.create_clientscope(desired_clientscope, realm=realm) - after_clientscope = kc.get_clientscope_by_name(name, realm) - - result['end_state'] = sanitize_cr(after_clientscope) - - result['msg'] = 'Clientscope {name} has been created with ID {id}'.format(name=after_clientscope['name'], - id=after_clientscope['id']) - - else: - if state == 'present': - # Process an update - - # no changes - if desired_clientscope == before_clientscope: - result['changed'] = False - result['end_state'] = sanitize_cr(desired_clientscope) - result['msg'] = "No changes required to clientscope {name}.".format(name=before_clientscope['name']) - module.exit_json(**result) - - # doing an update - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=sanitize_cr(before_clientscope), after=sanitize_cr(desired_clientscope)) - - if module.check_mode: - module.exit_json(**result) - - # do the update - kc.update_clientscope(desired_clientscope, realm=realm) - - # do the protocolmappers update - if protocol_mappers is not None: - for protocol_mapper in protocol_mappers: - # update if protocolmapper exist - current_protocolmapper = kc.get_clientscope_protocolmapper_by_name(desired_clientscope['id'], protocol_mapper['name'], realm=realm) - if current_protocolmapper is not None: - protocol_mapper['id'] = current_protocolmapper['id'] - kc.update_clientscope_protocolmappers(desired_clientscope['id'], protocol_mapper, realm=realm) - # create otherwise - else: - kc.create_clientscope_protocolmapper(desired_clientscope['id'], protocol_mapper, realm=realm) - - after_clientscope = kc.get_clientscope_by_clientscopeid(desired_clientscope['id'], realm=realm) - - result['end_state'] = after_clientscope - - result['msg'] = "Clientscope {id} has been updated".format(id=after_clientscope['id']) - module.exit_json(**result) - - else: - # Process a deletion (because state was not 'present') - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=sanitize_cr(before_clientscope), after='') - - if module.check_mode: - module.exit_json(**result) - - # delete it - cid = before_clientscope['id'] - kc.delete_clientscope(cid=cid, realm=realm) - - result['end_state'] = {} - - result['msg'] = "Clientscope {name} has been deleted".format(name=before_clientscope['name']) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_clienttemplate.py b/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_clienttemplate.py deleted file mode 100644 index cec7c93d..00000000 --- a/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_clienttemplate.py +++ /dev/null @@ -1,449 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017, Eike Frost -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: keycloak_clienttemplate - -short_description: Allows administration of Keycloak client templates via Keycloak API - - -description: - - This module allows the administration of Keycloak client templates via the Keycloak REST API. It - requires access to the REST API via OpenID Connect; the user connecting and the client being - used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate client definition with the scope tailored - to your needs and a user having the expected roles. - - - The names of module options are snake_cased versions of the camelCase ones found in the - Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html) - - - The Keycloak API does not always enforce for only sensible settings to be used -- you can set - SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful. - If you do not specify a setting, usually a sensible default is chosen. - -options: - state: - description: - - State of the client template. - - On C(present), the client template will be created (or updated if it exists already). - - On C(absent), the client template will be removed if it exists - choices: ['present', 'absent'] - default: 'present' - type: str - - id: - description: - - Id of client template to be worked on. This is usually a UUID. - type: str - - realm: - description: - - Realm this client template is found in. - type: str - default: master - - name: - description: - - Name of the client template. - type: str - - description: - description: - - Description of the client template in Keycloak. - type: str - - protocol: - description: - - Type of client template (either C(openid-connect) or C(saml). - choices: ['openid-connect', 'saml'] - type: str - - full_scope_allowed: - description: - - Is the "Full Scope Allowed" feature set for this client template or not. - This is 'fullScopeAllowed' in the Keycloak REST API. - type: bool - - protocol_mappers: - description: - - a list of dicts defining protocol mappers for this client template. - This is 'protocolMappers' in the Keycloak REST API. - type: list - elements: dict - suboptions: - consentRequired: - description: - - Specifies whether a user needs to provide consent to a client for this mapper to be active. - type: bool - - consentText: - description: - - The human-readable name of the consent the user is presented to accept. - type: str - - id: - description: - - Usually a UUID specifying the internal ID of this protocol mapper instance. - type: str - - name: - description: - - The name of this protocol mapper. - type: str - - protocol: - description: - - This is either C(openid-connect) or C(saml), this specifies for which protocol this protocol mapper. - is active. - choices: ['openid-connect', 'saml'] - type: str - - protocolMapper: - description: - - The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is - impossible to provide since this may be extended through SPIs by the user of Keycloak, - by default Keycloak as of 3.4 ships with at least - - C(docker-v2-allow-all-mapper) - - C(oidc-address-mapper) - - C(oidc-full-name-mapper) - - C(oidc-group-membership-mapper) - - C(oidc-hardcoded-claim-mapper) - - C(oidc-hardcoded-role-mapper) - - C(oidc-role-name-mapper) - - C(oidc-script-based-protocol-mapper) - - C(oidc-sha256-pairwise-sub-mapper) - - C(oidc-usermodel-attribute-mapper) - - C(oidc-usermodel-client-role-mapper) - - C(oidc-usermodel-property-mapper) - - C(oidc-usermodel-realm-role-mapper) - - C(oidc-usersessionmodel-note-mapper) - - C(saml-group-membership-mapper) - - C(saml-hardcode-attribute-mapper) - - C(saml-hardcode-role-mapper) - - C(saml-role-list-mapper) - - C(saml-role-name-mapper) - - C(saml-user-attribute-mapper) - - C(saml-user-property-mapper) - - C(saml-user-session-note-mapper) - - An exhaustive list of available mappers on your installation can be obtained on - the admin console by going to Server Info -> Providers and looking under - 'protocol-mapper'. - type: str - - config: - description: - - Dict specifying the configuration options for the protocol mapper; the - contents differ depending on the value of I(protocolMapper) and are not documented - other than by the source of the mappers and its parent class(es). An example is given - below. It is easiest to obtain valid config values by dumping an already-existing - protocol mapper configuration through check-mode in the I(existing) field. - type: dict - - attributes: - description: - - A dict of further attributes for this client template. This can contain various - configuration settings, though in the default installation of Keycloak as of 3.4, none - are documented or known, so this is usually empty. - type: dict - -notes: -- The Keycloak REST API defines further fields (namely I(bearerOnly), I(consentRequired), I(standardFlowEnabled), - I(implicitFlowEnabled), I(directAccessGrantsEnabled), I(serviceAccountsEnabled), I(publicClient), and - I(frontchannelLogout)) which, while available with keycloak_client, do not have any effect on - Keycloak client-templates and are discarded if supplied with an API request changing client-templates. As such, - they are not available through this module. - -extends_documentation_fragment: -- community.general.keycloak - -author: - - Eike Frost (@eikef) -''' - -EXAMPLES = ''' -- name: Create or update Keycloak client template (minimal), authentication with credentials - community.general.keycloak_client: - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - realm: master - name: this_is_a_test - delegate_to: localhost - -- name: Create or update Keycloak client template (minimal), authentication with token - community.general.keycloak_clienttemplate: - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - token: TOKEN - realm: master - name: this_is_a_test - delegate_to: localhost - -- name: Delete Keycloak client template - community.general.keycloak_client: - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - realm: master - state: absent - name: test01 - delegate_to: localhost - -- name: Create or update Keycloak client template (with a protocol mapper) - community.general.keycloak_client: - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - realm: master - name: this_is_a_test - protocol_mappers: - - config: - access.token.claim: True - claim.name: "family_name" - id.token.claim: True - jsonType.label: String - user.attribute: lastName - userinfo.token.claim: True - consentRequired: True - consentText: "${familyName}" - name: family name - protocol: openid-connect - protocolMapper: oidc-usermodel-property-mapper - full_scope_allowed: false - id: bce6f5e9-d7d3-4955-817e-c5b7f8d65b3f - delegate_to: localhost -''' - -RETURN = ''' -msg: - description: Message as to what action was taken. - returned: always - type: str - sample: "Client template testclient has been updated" - -proposed: - description: Representation of proposed client template. - returned: always - type: dict - sample: { - name: "test01" - } - -existing: - description: Representation of existing client template (sample is truncated). - returned: always - type: dict - sample: { - "description": "test01", - "fullScopeAllowed": false, - "id": "9c3712ab-decd-481e-954f-76da7b006e5f", - "name": "test01", - "protocol": "saml" - } - -end_state: - description: Representation of client template after module execution (sample is truncated). - returned: on success - type: dict - sample: { - "description": "test01", - "fullScopeAllowed": false, - "id": "9c3712ab-decd-481e-954f-76da7b006e5f", - "name": "test01", - "protocol": "saml" - } -''' - -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError -from ansible.module_utils.basic import AnsibleModule - - -def main(): - """ - Module execution - - :return: - """ - argument_spec = keycloak_argument_spec() - - protmapper_spec = dict( - consentRequired=dict(type='bool'), - consentText=dict(type='str'), - id=dict(type='str'), - name=dict(type='str'), - protocol=dict(type='str', choices=['openid-connect', 'saml']), - protocolMapper=dict(type='str'), - config=dict(type='dict'), - ) - - meta_args = dict( - realm=dict(type='str', default='master'), - state=dict(default='present', choices=['present', 'absent']), - - id=dict(type='str'), - name=dict(type='str'), - description=dict(type='str'), - protocol=dict(type='str', choices=['openid-connect', 'saml']), - attributes=dict(type='dict'), - full_scope_allowed=dict(type='bool'), - protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec), - ) - - argument_spec.update(meta_args) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['id', 'name'], - ['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) - - result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) - - # Obtain access token, initialize API - try: - connection_header = get_token(module.params) - except KeycloakError as e: - module.fail_json(msg=str(e)) - - kc = KeycloakAPI(module, connection_header) - - realm = module.params.get('realm') - state = module.params.get('state') - cid = module.params.get('id') - - # Filter and map the parameters names that apply to the client template - clientt_params = [x for x in module.params - if x not in ['state', 'auth_keycloak_url', 'auth_client_id', 'auth_realm', - 'auth_client_secret', 'auth_username', 'auth_password', - 'validate_certs', 'realm'] and module.params.get(x) is not None] - - # See if it already exists in Keycloak - if cid is None: - before_clientt = kc.get_client_template_by_name(module.params.get('name'), realm=realm) - if before_clientt is not None: - cid = before_clientt['id'] - else: - before_clientt = kc.get_client_template_by_id(cid, realm=realm) - - if before_clientt is None: - before_clientt = {} - - result['existing'] = before_clientt - - # Build a proposed changeset from parameters given to this module - changeset = {} - - for clientt_param in clientt_params: - # lists in the Keycloak API are sorted - new_param_value = module.params.get(clientt_param) - if isinstance(new_param_value, list): - try: - new_param_value = sorted(new_param_value) - except TypeError: - pass - changeset[camel(clientt_param)] = new_param_value - - # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) - desired_clientt = before_clientt.copy() - desired_clientt.update(changeset) - - result['proposed'] = changeset - - # Cater for when it doesn't exist (an empty dict) - if not before_clientt: - if state == 'absent': - # Do nothing and exit - if module._diff: - result['diff'] = dict(before='', after='') - result['changed'] = False - result['end_state'] = {} - result['msg'] = 'Client template does not exist, doing nothing.' - module.exit_json(**result) - - # Process a creation - result['changed'] = True - - if 'name' not in desired_clientt: - module.fail_json(msg='name needs to be specified when creating a new client') - - if module._diff: - result['diff'] = dict(before='', after=desired_clientt) - - if module.check_mode: - module.exit_json(**result) - - # create it - kc.create_client_template(desired_clientt, realm=realm) - after_clientt = kc.get_client_template_by_name(desired_clientt['name'], realm=realm) - - result['end_state'] = after_clientt - - result['msg'] = 'Client template %s has been created.' % desired_clientt['name'] - module.exit_json(**result) - - else: - if state == 'present': - # Process an update - - result['changed'] = True - if module.check_mode: - # We can only compare the current client template with the proposed updates we have - if module._diff: - result['diff'] = dict(before=before_clientt, - after=desired_clientt) - - module.exit_json(**result) - - # do the update - kc.update_client_template(cid, desired_clientt, realm=realm) - - after_clientt = kc.get_client_template_by_id(cid, realm=realm) - if before_clientt == after_clientt: - result['changed'] = False - - result['end_state'] = after_clientt - - if module._diff: - result['diff'] = dict(before=before_clientt, after=after_clientt) - - result['msg'] = 'Client template %s has been updated.' % desired_clientt['name'] - module.exit_json(**result) - - else: - # Process a deletion (because state was not 'present') - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=before_clientt, after='') - - if module.check_mode: - module.exit_json(**result) - - # delete it - kc.delete_client_template(cid, realm=realm) - result['proposed'] = {} - - result['end_state'] = {} - - result['msg'] = 'Client template %s has been deleted.' % before_clientt['name'] - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_group.py b/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_group.py deleted file mode 100644 index 3455f578..00000000 --- a/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_group.py +++ /dev/null @@ -1,440 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2019, Adam Goossens -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: keycloak_group - -short_description: Allows administration of Keycloak groups via Keycloak API - -description: - - This module allows you to add, remove or modify Keycloak groups via the Keycloak REST API. - It requires access to the REST API via OpenID Connect; the user connecting and the client being - used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate client definition with the scope tailored - to your needs and a user having the expected roles. - - - The names of module options are snake_cased versions of the camelCase ones found in the - Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). - - - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will - be returned that way by this module. You may pass single values for attributes when calling the module, - and this will be translated into a list suitable for the API. - - - When updating a group, where possible provide the group ID to the module. This removes a lookup - to the API to translate the name into the group ID. - - -options: - state: - description: - - State of the group. - - On C(present), the group will be created if it does not yet exist, or updated with the parameters you provide. - - On C(absent), the group will be removed if it exists. - default: 'present' - type: str - choices: - - present - - absent - - name: - type: str - description: - - Name of the group. - - This parameter is required only when creating or updating the group. - - realm: - type: str - description: - - They Keycloak realm under which this group resides. - default: 'master' - - id: - type: str - description: - - The unique identifier for this group. - - This parameter is not required for updating or deleting a group but - providing it will reduce the number of API calls required. - - attributes: - type: dict - description: - - A dict of key/value pairs to set as custom attributes for the group. - - Values may be single values (e.g. a string) or a list of strings. - -notes: - - Presently, the I(realmRoles), I(clientRoles) and I(access) attributes returned by the Keycloak API - are read-only for groups. This limitation will be removed in a later version of this module. - -extends_documentation_fragment: -- community.general.keycloak - - -author: - - Adam Goossens (@adamgoossens) -''' - -EXAMPLES = ''' -- name: Create a Keycloak group, authentication with credentials - community.general.keycloak_group: - name: my-new-kc-group - realm: MyCustomRealm - state: present - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - delegate_to: localhost - -- name: Create a Keycloak group, authentication with token - community.general.keycloak_group: - name: my-new-kc-group - realm: MyCustomRealm - state: present - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - token: TOKEN - delegate_to: localhost - -- name: Delete a keycloak group - community.general.keycloak_group: - id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' - state: absent - realm: MyCustomRealm - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - delegate_to: localhost - -- name: Delete a Keycloak group based on name - community.general.keycloak_group: - name: my-group-for-deletion - state: absent - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - delegate_to: localhost - -- name: Update the name of a Keycloak group - community.general.keycloak_group: - id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' - name: an-updated-kc-group-name - state: present - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - delegate_to: localhost - -- name: Create a keycloak group with some custom attributes - community.general.keycloak_group: - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - name: my-new_group - attributes: - attrib1: value1 - attrib2: value2 - attrib3: - - with - - numerous - - individual - - list - - items - delegate_to: localhost -''' - -RETURN = ''' -msg: - description: Message as to what action was taken. - returned: always - type: str - -end_state: - description: Representation of the group after module execution (sample is truncated). - returned: on success - type: complex - contains: - id: - description: GUID that identifies the group. - type: str - returned: always - sample: 23f38145-3195-462c-97e7-97041ccea73e - name: - description: Name of the group. - type: str - returned: always - sample: grp-test-123 - attributes: - description: Attributes applied to this group. - type: dict - returned: always - sample: - attr1: ["val1", "val2", "val3"] - path: - description: URI path to the group. - type: str - returned: always - sample: /grp-test-123 - realmRoles: - description: An array of the realm-level roles granted to this group. - type: list - returned: always - sample: [] - subGroups: - description: A list of groups that are children of this group. These groups will have the same parameters as - documented here. - type: list - returned: always - clientRoles: - description: A list of client-level roles granted to this group. - type: list - returned: always - sample: [] - access: - description: A dict describing the accesses you have to this group based on the credentials used. - type: dict - returned: always - sample: - manage: true - manageMembership: true - view: true - -group: - description: - - Representation of the group after module execution. - - Deprecated return value, it will be removed in community.general 6.0.0. Please use the return value I(end_state) instead. - returned: always - type: complex - contains: - id: - description: GUID that identifies the group. - type: str - returned: always - sample: 23f38145-3195-462c-97e7-97041ccea73e - name: - description: Name of the group. - type: str - returned: always - sample: grp-test-123 - attributes: - description: Attributes applied to this group. - type: dict - returned: always - sample: - attr1: ["val1", "val2", "val3"] - path: - description: URI path to the group. - type: str - returned: always - sample: /grp-test-123 - realmRoles: - description: An array of the realm-level roles granted to this group. - type: list - returned: always - sample: [] - subGroups: - description: A list of groups that are children of this group. These groups will have the same parameters as - documented here. - type: list - returned: always - clientRoles: - description: A list of client-level roles granted to this group. - type: list - returned: always - sample: [] - access: - description: A dict describing the accesses you have to this group based on the credentials used. - type: dict - returned: always - sample: - manage: true - manageMembership: true - view: true - -''' - -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError -from ansible.module_utils.basic import AnsibleModule - - -def main(): - """ - Module execution - - :return: - """ - argument_spec = keycloak_argument_spec() - - meta_args = dict( - state=dict(default='present', choices=['present', 'absent']), - realm=dict(default='master'), - id=dict(type='str'), - name=dict(type='str'), - attributes=dict(type='dict'), - ) - - argument_spec.update(meta_args) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['id', 'name'], - ['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) - - result = dict(changed=False, msg='', diff={}, group='') - - # Obtain access token, initialize API - try: - connection_header = get_token(module.params) - except KeycloakError as e: - module.fail_json(msg=str(e)) - - kc = KeycloakAPI(module, connection_header) - - realm = module.params.get('realm') - state = module.params.get('state') - gid = module.params.get('id') - name = module.params.get('name') - attributes = module.params.get('attributes') - - # attributes in Keycloak have their values returned as lists - # via the API. attributes is a dict, so we'll transparently convert - # the values to lists. - if attributes is not None: - for key, val in module.params['attributes'].items(): - module.params['attributes'][key] = [val] if not isinstance(val, list) else val - - # Filter and map the parameters names that apply to the group - group_params = [x for x in module.params - if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm'] and - module.params.get(x) is not None] - - # See if it already exists in Keycloak - if gid is None: - before_group = kc.get_group_by_name(name, realm=realm) - else: - before_group = kc.get_group_by_groupid(gid, realm=realm) - - if before_group is None: - before_group = {} - - # Build a proposed changeset from parameters given to this module - changeset = {} - - for param in group_params: - new_param_value = module.params.get(param) - old_value = before_group[param] if param in before_group else None - if new_param_value != old_value: - changeset[camel(param)] = new_param_value - - # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) - desired_group = before_group.copy() - desired_group.update(changeset) - - # Cater for when it doesn't exist (an empty dict) - if not before_group: - if state == 'absent': - # Do nothing and exit - if module._diff: - result['diff'] = dict(before='', after='') - result['changed'] = False - result['end_state'] = {} - result['group'] = result['end_state'] - result['msg'] = 'Group does not exist; doing nothing.' - module.exit_json(**result) - - # Process a creation - result['changed'] = True - - if name is None: - module.fail_json(msg='name must be specified when creating a new group') - - if module._diff: - result['diff'] = dict(before='', after=desired_group) - - if module.check_mode: - module.exit_json(**result) - - # create it - kc.create_group(desired_group, realm=realm) - after_group = kc.get_group_by_name(name, realm) - - result['end_state'] = after_group - result['group'] = result['end_state'] - - result['msg'] = 'Group {name} has been created with ID {id}'.format(name=after_group['name'], - id=after_group['id']) - module.exit_json(**result) - - else: - if state == 'present': - # Process an update - - # no changes - if desired_group == before_group: - result['changed'] = False - result['end_state'] = desired_group - result['group'] = result['end_state'] - result['msg'] = "No changes required to group {name}.".format(name=before_group['name']) - module.exit_json(**result) - - # doing an update - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=before_group, after=desired_group) - - if module.check_mode: - module.exit_json(**result) - - # do the update - kc.update_group(desired_group, realm=realm) - - after_group = kc.get_group_by_groupid(desired_group['id'], realm=realm) - - result['end_state'] = after_group - result['group'] = result['end_state'] - - result['msg'] = "Group {id} has been updated".format(id=after_group['id']) - module.exit_json(**result) - - else: - # Process a deletion (because state was not 'present') - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=before_group, after='') - - if module.check_mode: - module.exit_json(**result) - - # delete it - gid = before_group['id'] - kc.delete_group(groupid=gid, realm=realm) - - result['end_state'] = {} - result['group'] = result['end_state'] - - result['msg'] = "Group {name} has been deleted".format(name=before_group['name']) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_identity_provider.py b/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_identity_provider.py deleted file mode 100644 index a4adddd9..00000000 --- a/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_identity_provider.py +++ /dev/null @@ -1,646 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: keycloak_identity_provider - -short_description: Allows administration of Keycloak identity providers via Keycloak API - -version_added: 3.6.0 - -description: - - This module allows you to add, remove or modify Keycloak identity providers via the Keycloak REST API. - It requires access to the REST API via OpenID Connect; the user connecting and the client being - used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate client definition with the scope tailored - to your needs and a user having the expected roles. - - - The names of module options are snake_cased versions of the camelCase ones found in the - Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/15.0/rest-api/index.html). - - -options: - state: - description: - - State of the identity provider. - - On C(present), the identity provider will be created if it does not yet exist, or updated with the parameters you provide. - - On C(absent), the identity provider will be removed if it exists. - default: 'present' - type: str - choices: - - present - - absent - - realm: - description: - - The Keycloak realm under which this identity provider resides. - default: 'master' - type: str - - alias: - description: - - The alias uniquely identifies an identity provider and it is also used to build the redirect URI. - required: true - type: str - - display_name: - description: - - Friendly name for identity provider. - aliases: - - displayName - type: str - - enabled: - description: - - Enable/disable this identity provider. - type: bool - - store_token: - description: - - Enable/disable whether tokens must be stored after authenticating users. - aliases: - - storeToken - type: bool - - add_read_token_role_on_create: - description: - - Enable/disable whether new users can read any stored tokens. This assigns the C(broker.read-token) role. - aliases: - - addReadTokenRoleOnCreate - type: bool - - trust_email: - description: - - If enabled, email provided by this provider is not verified even if verification is enabled for the realm. - aliases: - - trustEmail - type: bool - - link_only: - description: - - If true, users cannot log in through this provider. They can only link to this provider. - This is useful if you don't want to allow login from the provider, but want to integrate with a provider. - aliases: - - linkOnly - type: bool - - first_broker_login_flow_alias: - description: - - Alias of authentication flow, which is triggered after first login with this identity provider. - aliases: - - firstBrokerLoginFlowAlias - type: str - - post_broker_login_flow_alias: - description: - - Alias of authentication flow, which is triggered after each login with this identity provider. - aliases: - - postBrokerLoginFlowAlias - type: str - - authenticate_by_default: - description: - - Specifies if this identity provider should be used by default for authentication even before displaying login screen. - aliases: - - authenticateByDefault - type: bool - - provider_id: - description: - - Protocol used by this provider (supported values are C(oidc) or C(saml)). - aliases: - - providerId - type: str - - config: - description: - - Dict specifying the configuration options for the provider; the contents differ depending on the value of I(providerId). - Examples are given below for C(oidc) and C(saml). It is easiest to obtain valid config values by dumping an already-existing - identity provider configuration through check-mode in the I(existing) field. - type: dict - suboptions: - hide_on_login_page: - description: - - If hidden, login with this provider is possible only if requested explicitly, for example using the C(kc_idp_hint) parameter. - aliases: - - hideOnLoginPage - type: bool - - gui_order: - description: - - Number defining order of the provider in GUI (for example, on Login page). - aliases: - - guiOrder - type: int - - sync_mode: - description: - - Default sync mode for all mappers. The sync mode determines when user data will be synced using the mappers. - aliases: - - syncMode - type: str - - issuer: - description: - - The issuer identifier for the issuer of the response. If not provided, no validation will be performed. - type: str - - authorizationUrl: - description: - - The Authorization URL. - type: str - - tokenUrl: - description: - - The Token URL. - type: str - - logoutUrl: - description: - - End session endpoint to use to logout user from external IDP. - type: str - - userInfoUrl: - description: - - The User Info URL. - type: str - - clientAuthMethod: - description: - - The client authentication method. - type: str - - clientId: - description: - - The client or client identifier registered within the identity provider. - type: str - - clientSecret: - description: - - The client or client secret registered within the identity provider. - type: str - - defaultScope: - description: - - The scopes to be sent when asking for authorization. - type: str - - validateSignature: - description: - - Enable/disable signature validation of external IDP signatures. - type: bool - - useJwksUrl: - description: - - If the switch is on, identity provider public keys will be downloaded from given JWKS URL. - type: bool - - jwksUrl: - description: - - URL where identity provider keys in JWK format are stored. See JWK specification for more details. - type: str - - entityId: - description: - - The Entity ID that will be used to uniquely identify this SAML Service Provider. - type: str - - singleSignOnServiceUrl: - description: - - The URL that must be used to send authentication requests (SAML AuthnRequest). - type: str - - singleLogoutServiceUrl: - description: - - The URL that must be used to send logout requests. - type: str - - backchannelSupported: - description: - - Does the external IDP support backchannel logout? - type: str - - nameIDPolicyFormat: - description: - - Specifies the URI reference corresponding to a name identifier format. - type: str - - principalType: - description: - - Way to identify and track external users from the assertion. - type: str - - mappers: - description: - - A list of dicts defining mappers associated with this Identity Provider. - type: list - elements: dict - suboptions: - id: - description: - - Unique ID of this mapper. - type: str - - name: - description: - - Name of the mapper. - type: str - - identityProviderAlias: - description: - - Alias of the identity provider for this mapper. - type: str - - identityProviderMapper: - description: - - Type of mapper. - type: str - - config: - description: - - Dict specifying the configuration options for the mapper; the contents differ depending on the value of I(identityProviderMapper). - type: dict - -extends_documentation_fragment: -- community.general.keycloak - -author: - - Laurent Paumier (@laurpaum) -''' - -EXAMPLES = ''' -- name: Create OIDC identity provider, authentication with credentials - community.general.keycloak_identity_provider: - state: present - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: admin - auth_password: admin - realm: myrealm - alias: oidc-idp - display_name: OpenID Connect IdP - enabled: true - provider_id: oidc - config: - issuer: https://idp.example.com - authorizationUrl: https://idp.example.com/auth - tokenUrl: https://idp.example.com/token - userInfoUrl: https://idp.example.com/userinfo - clientAuthMethod: client_secret_post - clientId: my-client - clientSecret: secret - syncMode: FORCE - mappers: - - name: first_name - identityProviderMapper: oidc-user-attribute-idp-mapper - config: - claim: first_name - user.attribute: first_name - syncMode: INHERIT - - name: last_name - identityProviderMapper: oidc-user-attribute-idp-mapper - config: - claim: last_name - user.attribute: last_name - syncMode: INHERIT - -- name: Create SAML identity provider, authentication with credentials - community.general.keycloak_identity_provider: - state: present - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: admin - auth_password: admin - realm: myrealm - alias: saml-idp - display_name: SAML IdP - enabled: true - provider_id: saml - config: - entityId: https://auth.example.com/auth/realms/myrealm - singleSignOnServiceUrl: https://idp.example.com/login - wantAuthnRequestsSigned: true - wantAssertionsSigned: true - mappers: - - name: roles - identityProviderMapper: saml-user-attribute-idp-mapper - config: - user.attribute: roles - attribute.friendly.name: User Roles - attribute.name: roles - syncMode: INHERIT -''' - -RETURN = ''' -msg: - description: Message as to what action was taken. - returned: always - type: str - sample: "Identity provider my-idp has been created" - -proposed: - description: Representation of proposed identity provider. - returned: always - type: dict - sample: { - "config": { - "authorizationUrl": "https://idp.example.com/auth", - "clientAuthMethod": "client_secret_post", - "clientId": "my-client", - "clientSecret": "secret", - "issuer": "https://idp.example.com", - "tokenUrl": "https://idp.example.com/token", - "userInfoUrl": "https://idp.example.com/userinfo" - }, - "displayName": "OpenID Connect IdP", - "providerId": "oidc" - } - -existing: - description: Representation of existing identity provider. - returned: always - type: dict - sample: { - "addReadTokenRoleOnCreate": false, - "alias": "my-idp", - "authenticateByDefault": false, - "config": { - "authorizationUrl": "https://old.example.com/auth", - "clientAuthMethod": "client_secret_post", - "clientId": "my-client", - "clientSecret": "**********", - "issuer": "https://old.example.com", - "syncMode": "FORCE", - "tokenUrl": "https://old.example.com/token", - "userInfoUrl": "https://old.example.com/userinfo" - }, - "displayName": "OpenID Connect IdP", - "enabled": true, - "firstBrokerLoginFlowAlias": "first broker login", - "internalId": "4d28d7e3-1b80-45bb-8a30-5822bf55aa1c", - "linkOnly": false, - "providerId": "oidc", - "storeToken": false, - "trustEmail": false, - } - -end_state: - description: Representation of identity provider after module execution. - returned: on success - type: dict - sample: { - "addReadTokenRoleOnCreate": false, - "alias": "my-idp", - "authenticateByDefault": false, - "config": { - "authorizationUrl": "https://idp.example.com/auth", - "clientAuthMethod": "client_secret_post", - "clientId": "my-client", - "clientSecret": "**********", - "issuer": "https://idp.example.com", - "tokenUrl": "https://idp.example.com/token", - "userInfoUrl": "https://idp.example.com/userinfo" - }, - "displayName": "OpenID Connect IdP", - "enabled": true, - "firstBrokerLoginFlowAlias": "first broker login", - "internalId": "4d28d7e3-1b80-45bb-8a30-5822bf55aa1c", - "linkOnly": false, - "providerId": "oidc", - "storeToken": false, - "trustEmail": false, - } -''' - -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError -from ansible.module_utils.basic import AnsibleModule -from copy import deepcopy - - -def sanitize(idp): - idpcopy = deepcopy(idp) - if 'config' in idpcopy: - if 'clientSecret' in idpcopy['config']: - idpcopy['clientSecret'] = '**********' - return idpcopy - - -def get_identity_provider_with_mappers(kc, alias, realm): - idp = kc.get_identity_provider(alias, realm) - if idp is not None: - idp['mappers'] = sorted(kc.get_identity_provider_mappers(alias, realm), key=lambda x: x.get('name')) - if idp is None: - idp = {} - return idp - - -def main(): - """ - Module execution - - :return: - """ - argument_spec = keycloak_argument_spec() - - mapper_spec = dict( - id=dict(type='str'), - name=dict(type='str'), - identityProviderAlias=dict(type='str'), - identityProviderMapper=dict(type='str'), - config=dict(type='dict'), - ) - - meta_args = dict( - state=dict(type='str', default='present', choices=['present', 'absent']), - realm=dict(type='str', default='master'), - alias=dict(type='str', required=True), - add_read_token_role_on_create=dict(type='bool', aliases=['addReadTokenRoleOnCreate']), - authenticate_by_default=dict(type='bool', aliases=['authenticateByDefault']), - config=dict(type='dict'), - display_name=dict(type='str', aliases=['displayName']), - enabled=dict(type='bool'), - first_broker_login_flow_alias=dict(type='str', aliases=['firstBrokerLoginFlowAlias']), - link_only=dict(type='bool', aliases=['linkOnly']), - post_broker_login_flow_alias=dict(type='str', aliases=['postBrokerLoginFlowAlias']), - provider_id=dict(type='str', aliases=['providerId']), - store_token=dict(type='bool', aliases=['storeToken']), - trust_email=dict(type='bool', aliases=['trustEmail']), - mappers=dict(type='list', elements='dict', options=mapper_spec), - ) - - argument_spec.update(meta_args) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) - - result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) - - # Obtain access token, initialize API - try: - connection_header = get_token(module.params) - except KeycloakError as e: - module.fail_json(msg=str(e)) - - kc = KeycloakAPI(module, connection_header) - - realm = module.params.get('realm') - alias = module.params.get('alias') - state = module.params.get('state') - - # Filter and map the parameters names that apply to the identity provider. - idp_params = [x for x in module.params - if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'mappers'] and - module.params.get(x) is not None] - - # See if it already exists in Keycloak - before_idp = get_identity_provider_with_mappers(kc, alias, realm) - - # Build a proposed changeset from parameters given to this module - changeset = {} - - for param in idp_params: - new_param_value = module.params.get(param) - old_value = before_idp[camel(param)] if camel(param) in before_idp else None - if new_param_value != old_value: - changeset[camel(param)] = new_param_value - - # special handling of mappers list to allow change detection - if module.params.get('mappers') is not None: - for change in module.params['mappers']: - change = dict((k, v) for k, v in change.items() if change[k] is not None) - if change.get('id') is None and change.get('name') is None: - module.fail_json(msg='Either `name` or `id` has to be specified on each mapper.') - if before_idp == dict(): - old_mapper = dict() - elif change.get('id') is not None: - old_mapper = kc.get_identity_provider_mapper(change['id'], alias, realm) - if old_mapper is None: - old_mapper = dict() - else: - found = [x for x in kc.get_identity_provider_mappers(alias, realm) if x['name'] == change['name']] - if len(found) == 1: - old_mapper = found[0] - else: - old_mapper = dict() - new_mapper = old_mapper.copy() - new_mapper.update(change) - if new_mapper != old_mapper: - if changeset.get('mappers') is None: - changeset['mappers'] = list() - changeset['mappers'].append(new_mapper) - - # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) - desired_idp = before_idp.copy() - desired_idp.update(changeset) - - result['proposed'] = sanitize(changeset) - result['existing'] = sanitize(before_idp) - - # Cater for when it doesn't exist (an empty dict) - if not before_idp: - if state == 'absent': - # Do nothing and exit - if module._diff: - result['diff'] = dict(before='', after='') - result['changed'] = False - result['end_state'] = {} - result['msg'] = 'Identity provider does not exist; doing nothing.' - module.exit_json(**result) - - # Process a creation - result['changed'] = True - - if module._diff: - result['diff'] = dict(before='', after=sanitize(desired_idp)) - - if module.check_mode: - module.exit_json(**result) - - # create it - desired_idp = desired_idp.copy() - mappers = desired_idp.pop('mappers', []) - kc.create_identity_provider(desired_idp, realm) - for mapper in mappers: - if mapper.get('identityProviderAlias') is None: - mapper['identityProviderAlias'] = alias - kc.create_identity_provider_mapper(mapper, alias, realm) - after_idp = get_identity_provider_with_mappers(kc, alias, realm) - - result['end_state'] = sanitize(after_idp) - - result['msg'] = 'Identity provider {alias} has been created'.format(alias=alias) - module.exit_json(**result) - - else: - if state == 'present': - # Process an update - - # no changes - if desired_idp == before_idp: - result['changed'] = False - result['end_state'] = sanitize(desired_idp) - result['msg'] = "No changes required to identity provider {alias}.".format(alias=alias) - module.exit_json(**result) - - # doing an update - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=sanitize(before_idp), after=sanitize(desired_idp)) - - if module.check_mode: - module.exit_json(**result) - - # do the update - desired_idp = desired_idp.copy() - updated_mappers = desired_idp.pop('mappers', []) - kc.update_identity_provider(desired_idp, realm) - for mapper in updated_mappers: - if mapper.get('id') is not None: - kc.update_identity_provider_mapper(mapper, alias, realm) - else: - if mapper.get('identityProviderAlias') is None: - mapper['identityProviderAlias'] = alias - kc.create_identity_provider_mapper(mapper, alias, realm) - for mapper in [x for x in before_idp['mappers'] - if [y for y in updated_mappers if y["name"] == x['name']] == []]: - kc.delete_identity_provider_mapper(mapper['id'], alias, realm) - - after_idp = get_identity_provider_with_mappers(kc, alias, realm) - - result['end_state'] = sanitize(after_idp) - - result['msg'] = "Identity provider {alias} has been updated".format(alias=alias) - module.exit_json(**result) - - elif state == 'absent': - # Process a deletion - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=sanitize(before_idp), after='') - - if module.check_mode: - module.exit_json(**result) - - # delete it - kc.delete_identity_provider(alias, realm) - - result['end_state'] = {} - - result['msg'] = "Identity provider {alias} has been deleted".format(alias=alias) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_realm.py b/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_realm.py deleted file mode 100644 index 289c1350..00000000 --- a/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_realm.py +++ /dev/null @@ -1,819 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017, Eike Frost -# Copyright (c) 2021, Christophe Gilles -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: keycloak_realm - -short_description: Allows administration of Keycloak realm via Keycloak API - -version_added: 3.0.0 - - -description: - - This module allows the administration of Keycloak realm via the Keycloak REST API. It - requires access to the REST API via OpenID Connect; the user connecting and the realm being - used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate realm definition with the scope tailored - to your needs and a user having the expected roles. - - - The names of module options are snake_cased versions of the camelCase ones found in the - Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). - Aliases are provided so camelCased versions can be used as well. - - - The Keycloak API does not always sanity check inputs e.g. you can set - SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful. - If you do not specify a setting, usually a sensible default is chosen. - -options: - state: - description: - - State of the realm. - - On C(present), the realm will be created (or updated if it exists already). - - On C(absent), the realm will be removed if it exists. - choices: ['present', 'absent'] - default: 'present' - type: str - - id: - description: - - The realm to create. - type: str - realm: - description: - - The realm name. - type: str - access_code_lifespan: - description: - - The realm access code lifespan. - aliases: - - accessCodeLifespan - type: int - access_code_lifespan_login: - description: - - The realm access code lifespan login. - aliases: - - accessCodeLifespanLogin - type: int - access_code_lifespan_user_action: - description: - - The realm access code lifespan user action. - aliases: - - accessCodeLifespanUserAction - type: int - access_token_lifespan: - description: - - The realm access token lifespan. - aliases: - - accessTokenLifespan - type: int - access_token_lifespan_for_implicit_flow: - description: - - The realm access token lifespan for implicit flow. - aliases: - - accessTokenLifespanForImplicitFlow - type: int - account_theme: - description: - - The realm account theme. - aliases: - - accountTheme - type: str - action_token_generated_by_admin_lifespan: - description: - - The realm action token generated by admin lifespan. - aliases: - - actionTokenGeneratedByAdminLifespan - type: int - action_token_generated_by_user_lifespan: - description: - - The realm action token generated by user lifespan. - aliases: - - actionTokenGeneratedByUserLifespan - type: int - admin_events_details_enabled: - description: - - The realm admin events details enabled. - aliases: - - adminEventsDetailsEnabled - type: bool - admin_events_enabled: - description: - - The realm admin events enabled. - aliases: - - adminEventsEnabled - type: bool - admin_theme: - description: - - The realm admin theme. - aliases: - - adminTheme - type: str - attributes: - description: - - The realm attributes. - type: dict - browser_flow: - description: - - The realm browser flow. - aliases: - - browserFlow - type: str - browser_security_headers: - description: - - The realm browser security headers. - aliases: - - browserSecurityHeaders - type: dict - brute_force_protected: - description: - - The realm brute force protected. - aliases: - - bruteForceProtected - type: bool - client_authentication_flow: - description: - - The realm client authentication flow. - aliases: - - clientAuthenticationFlow - type: str - client_scope_mappings: - description: - - The realm client scope mappings. - aliases: - - clientScopeMappings - type: dict - default_default_client_scopes: - description: - - The realm default default client scopes. - aliases: - - defaultDefaultClientScopes - type: list - elements: dict - default_groups: - description: - - The realm default groups. - aliases: - - defaultGroups - type: list - elements: dict - default_locale: - description: - - The realm default locale. - aliases: - - defaultLocale - type: str - default_optional_client_scopes: - description: - - The realm default optional client scopes. - aliases: - - defaultOptionalClientScopes - type: list - elements: dict - default_roles: - description: - - The realm default roles. - aliases: - - defaultRoles - type: list - elements: dict - default_signature_algorithm: - description: - - The realm default signature algorithm. - aliases: - - defaultSignatureAlgorithm - type: str - direct_grant_flow: - description: - - The realm direct grant flow. - aliases: - - directGrantFlow - type: str - display_name: - description: - - The realm display name. - aliases: - - displayName - type: str - display_name_html: - description: - - The realm display name HTML. - aliases: - - displayNameHtml - type: str - docker_authentication_flow: - description: - - The realm docker authentication flow. - aliases: - - dockerAuthenticationFlow - type: str - duplicate_emails_allowed: - description: - - The realm duplicate emails allowed option. - aliases: - - duplicateEmailsAllowed - type: bool - edit_username_allowed: - description: - - The realm edit username allowed option. - aliases: - - editUsernameAllowed - type: bool - email_theme: - description: - - The realm email theme. - aliases: - - emailTheme - type: str - enabled: - description: - - The realm enabled option. - type: bool - enabled_event_types: - description: - - The realm enabled event types. - aliases: - - enabledEventTypes - type: list - elements: str - events_enabled: - description: - - Enables or disables login events for this realm. - aliases: - - eventsEnabled - type: bool - version_added: 3.6.0 - events_expiration: - description: - - The realm events expiration. - aliases: - - eventsExpiration - type: int - events_listeners: - description: - - The realm events listeners. - aliases: - - eventsListeners - type: list - elements: str - failure_factor: - description: - - The realm failure factor. - aliases: - - failureFactor - type: int - internationalization_enabled: - description: - - The realm internationalization enabled option. - aliases: - - internationalizationEnabled - type: bool - login_theme: - description: - - The realm login theme. - aliases: - - loginTheme - type: str - login_with_email_allowed: - description: - - The realm login with email allowed option. - aliases: - - loginWithEmailAllowed - type: bool - max_delta_time_seconds: - description: - - The realm max delta time in seconds. - aliases: - - maxDeltaTimeSeconds - type: int - max_failure_wait_seconds: - description: - - The realm max failure wait in seconds. - aliases: - - maxFailureWaitSeconds - type: int - minimum_quick_login_wait_seconds: - description: - - The realm minimum quick login wait in seconds. - aliases: - - minimumQuickLoginWaitSeconds - type: int - not_before: - description: - - The realm not before. - aliases: - - notBefore - type: int - offline_session_idle_timeout: - description: - - The realm offline session idle timeout. - aliases: - - offlineSessionIdleTimeout - type: int - offline_session_max_lifespan: - description: - - The realm offline session max lifespan. - aliases: - - offlineSessionMaxLifespan - type: int - offline_session_max_lifespan_enabled: - description: - - The realm offline session max lifespan enabled option. - aliases: - - offlineSessionMaxLifespanEnabled - type: bool - otp_policy_algorithm: - description: - - The realm otp policy algorithm. - aliases: - - otpPolicyAlgorithm - type: str - otp_policy_digits: - description: - - The realm otp policy digits. - aliases: - - otpPolicyDigits - type: int - otp_policy_initial_counter: - description: - - The realm otp policy initial counter. - aliases: - - otpPolicyInitialCounter - type: int - otp_policy_look_ahead_window: - description: - - The realm otp policy look ahead window. - aliases: - - otpPolicyLookAheadWindow - type: int - otp_policy_period: - description: - - The realm otp policy period. - aliases: - - otpPolicyPeriod - type: int - otp_policy_type: - description: - - The realm otp policy type. - aliases: - - otpPolicyType - type: str - otp_supported_applications: - description: - - The realm otp supported applications. - aliases: - - otpSupportedApplications - type: list - elements: str - password_policy: - description: - - The realm password policy. - aliases: - - passwordPolicy - type: str - permanent_lockout: - description: - - The realm permanent lockout. - aliases: - - permanentLockout - type: bool - quick_login_check_milli_seconds: - description: - - The realm quick login check in milliseconds. - aliases: - - quickLoginCheckMilliSeconds - type: int - refresh_token_max_reuse: - description: - - The realm refresh token max reuse. - aliases: - - refreshTokenMaxReuse - type: int - registration_allowed: - description: - - The realm registration allowed option. - aliases: - - registrationAllowed - type: bool - registration_email_as_username: - description: - - The realm registration email as username option. - aliases: - - registrationEmailAsUsername - type: bool - registration_flow: - description: - - The realm registration flow. - aliases: - - registrationFlow - type: str - remember_me: - description: - - The realm remember me option. - aliases: - - rememberMe - type: bool - reset_credentials_flow: - description: - - The realm reset credentials flow. - aliases: - - resetCredentialsFlow - type: str - reset_password_allowed: - description: - - The realm reset password allowed option. - aliases: - - resetPasswordAllowed - type: bool - revoke_refresh_token: - description: - - The realm revoke refresh token option. - aliases: - - revokeRefreshToken - type: bool - smtp_server: - description: - - The realm smtp server. - aliases: - - smtpServer - type: dict - ssl_required: - description: - - The realm ssl required option. - choices: ['all', 'external', 'none'] - aliases: - - sslRequired - type: str - sso_session_idle_timeout: - description: - - The realm sso session idle timeout. - aliases: - - ssoSessionIdleTimeout - type: int - sso_session_idle_timeout_remember_me: - description: - - The realm sso session idle timeout remember me. - aliases: - - ssoSessionIdleTimeoutRememberMe - type: int - sso_session_max_lifespan: - description: - - The realm sso session max lifespan. - aliases: - - ssoSessionMaxLifespan - type: int - sso_session_max_lifespan_remember_me: - description: - - The realm sso session max lifespan remember me. - aliases: - - ssoSessionMaxLifespanRememberMe - type: int - supported_locales: - description: - - The realm supported locales. - aliases: - - supportedLocales - type: list - elements: str - user_managed_access_allowed: - description: - - The realm user managed access allowed option. - aliases: - - userManagedAccessAllowed - type: bool - verify_email: - description: - - The realm verify email option. - aliases: - - verifyEmail - type: bool - wait_increment_seconds: - description: - - The realm wait increment in seconds. - aliases: - - waitIncrementSeconds - type: int - -extends_documentation_fragment: -- community.general.keycloak - - -author: - - Christophe Gilles (@kris2kris) -''' - -EXAMPLES = ''' -- name: Create or update Keycloak realm (minimal example) - community.general.keycloak_realm: - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - id: realm - state: present - -- name: Delete a Keycloak realm - community.general.keycloak_realm: - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - id: test - state: absent - -''' - -RETURN = ''' -msg: - description: Message as to what action was taken. - returned: always - type: str - sample: "Realm testrealm has been updated" - -proposed: - description: Representation of proposed realm. - returned: always - type: dict - sample: { - id: "test" - } - -existing: - description: Representation of existing realm (sample is truncated). - returned: always - type: dict - sample: { - "adminUrl": "http://www.example.com/admin_url", - "attributes": { - "request.object.signature.alg": "RS256", - } - } - -end_state: - description: Representation of realm after module execution (sample is truncated). - returned: on success - type: dict - sample: { - "adminUrl": "http://www.example.com/admin_url", - "attributes": { - "request.object.signature.alg": "RS256", - } - } -''' - -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError -from ansible.module_utils.basic import AnsibleModule - - -def sanitize_cr(realmrep): - """ Removes probably sensitive details from a realm representation. - - :param realmrep: the realmrep dict to be sanitized - :return: sanitized realmrep dict - """ - result = realmrep.copy() - if 'secret' in result: - result['secret'] = '********' - if 'attributes' in result: - if 'saml.signing.private.key' in result['attributes']: - result['attributes'] = result['attributes'].copy() - result['attributes']['saml.signing.private.key'] = '********' - return result - - -def main(): - """ - Module execution - - :return: - """ - argument_spec = keycloak_argument_spec() - - meta_args = dict( - state=dict(default='present', choices=['present', 'absent']), - - id=dict(type='str'), - realm=dict(type='str'), - access_code_lifespan=dict(type='int', aliases=['accessCodeLifespan']), - access_code_lifespan_login=dict(type='int', aliases=['accessCodeLifespanLogin']), - access_code_lifespan_user_action=dict(type='int', aliases=['accessCodeLifespanUserAction']), - access_token_lifespan=dict(type='int', aliases=['accessTokenLifespan'], no_log=False), - access_token_lifespan_for_implicit_flow=dict(type='int', aliases=['accessTokenLifespanForImplicitFlow'], no_log=False), - account_theme=dict(type='str', aliases=['accountTheme']), - action_token_generated_by_admin_lifespan=dict(type='int', aliases=['actionTokenGeneratedByAdminLifespan'], no_log=False), - action_token_generated_by_user_lifespan=dict(type='int', aliases=['actionTokenGeneratedByUserLifespan'], no_log=False), - admin_events_details_enabled=dict(type='bool', aliases=['adminEventsDetailsEnabled']), - admin_events_enabled=dict(type='bool', aliases=['adminEventsEnabled']), - admin_theme=dict(type='str', aliases=['adminTheme']), - attributes=dict(type='dict'), - browser_flow=dict(type='str', aliases=['browserFlow']), - browser_security_headers=dict(type='dict', aliases=['browserSecurityHeaders']), - brute_force_protected=dict(type='bool', aliases=['bruteForceProtected']), - client_authentication_flow=dict(type='str', aliases=['clientAuthenticationFlow']), - client_scope_mappings=dict(type='dict', aliases=['clientScopeMappings']), - default_default_client_scopes=dict(type='list', elements='dict', aliases=['defaultDefaultClientScopes']), - default_groups=dict(type='list', elements='dict', aliases=['defaultGroups']), - default_locale=dict(type='str', aliases=['defaultLocale']), - default_optional_client_scopes=dict(type='list', elements='dict', aliases=['defaultOptionalClientScopes']), - default_roles=dict(type='list', elements='dict', aliases=['defaultRoles']), - default_signature_algorithm=dict(type='str', aliases=['defaultSignatureAlgorithm']), - direct_grant_flow=dict(type='str', aliases=['directGrantFlow']), - display_name=dict(type='str', aliases=['displayName']), - display_name_html=dict(type='str', aliases=['displayNameHtml']), - docker_authentication_flow=dict(type='str', aliases=['dockerAuthenticationFlow']), - duplicate_emails_allowed=dict(type='bool', aliases=['duplicateEmailsAllowed']), - edit_username_allowed=dict(type='bool', aliases=['editUsernameAllowed']), - email_theme=dict(type='str', aliases=['emailTheme']), - enabled=dict(type='bool'), - enabled_event_types=dict(type='list', elements='str', aliases=['enabledEventTypes']), - events_enabled=dict(type='bool', aliases=['eventsEnabled']), - events_expiration=dict(type='int', aliases=['eventsExpiration']), - events_listeners=dict(type='list', elements='str', aliases=['eventsListeners']), - failure_factor=dict(type='int', aliases=['failureFactor']), - internationalization_enabled=dict(type='bool', aliases=['internationalizationEnabled']), - login_theme=dict(type='str', aliases=['loginTheme']), - login_with_email_allowed=dict(type='bool', aliases=['loginWithEmailAllowed']), - max_delta_time_seconds=dict(type='int', aliases=['maxDeltaTimeSeconds']), - max_failure_wait_seconds=dict(type='int', aliases=['maxFailureWaitSeconds']), - minimum_quick_login_wait_seconds=dict(type='int', aliases=['minimumQuickLoginWaitSeconds']), - not_before=dict(type='int', aliases=['notBefore']), - offline_session_idle_timeout=dict(type='int', aliases=['offlineSessionIdleTimeout']), - offline_session_max_lifespan=dict(type='int', aliases=['offlineSessionMaxLifespan']), - offline_session_max_lifespan_enabled=dict(type='bool', aliases=['offlineSessionMaxLifespanEnabled']), - otp_policy_algorithm=dict(type='str', aliases=['otpPolicyAlgorithm']), - otp_policy_digits=dict(type='int', aliases=['otpPolicyDigits']), - otp_policy_initial_counter=dict(type='int', aliases=['otpPolicyInitialCounter']), - otp_policy_look_ahead_window=dict(type='int', aliases=['otpPolicyLookAheadWindow']), - otp_policy_period=dict(type='int', aliases=['otpPolicyPeriod']), - otp_policy_type=dict(type='str', aliases=['otpPolicyType']), - otp_supported_applications=dict(type='list', elements='str', aliases=['otpSupportedApplications']), - password_policy=dict(type='str', aliases=['passwordPolicy'], no_log=False), - permanent_lockout=dict(type='bool', aliases=['permanentLockout']), - quick_login_check_milli_seconds=dict(type='int', aliases=['quickLoginCheckMilliSeconds']), - refresh_token_max_reuse=dict(type='int', aliases=['refreshTokenMaxReuse'], no_log=False), - registration_allowed=dict(type='bool', aliases=['registrationAllowed']), - registration_email_as_username=dict(type='bool', aliases=['registrationEmailAsUsername']), - registration_flow=dict(type='str', aliases=['registrationFlow']), - remember_me=dict(type='bool', aliases=['rememberMe']), - reset_credentials_flow=dict(type='str', aliases=['resetCredentialsFlow']), - reset_password_allowed=dict(type='bool', aliases=['resetPasswordAllowed'], no_log=False), - revoke_refresh_token=dict(type='bool', aliases=['revokeRefreshToken']), - smtp_server=dict(type='dict', aliases=['smtpServer']), - ssl_required=dict(choices=["external", "all", "none"], aliases=['sslRequired']), - sso_session_idle_timeout=dict(type='int', aliases=['ssoSessionIdleTimeout']), - sso_session_idle_timeout_remember_me=dict(type='int', aliases=['ssoSessionIdleTimeoutRememberMe']), - sso_session_max_lifespan=dict(type='int', aliases=['ssoSessionMaxLifespan']), - sso_session_max_lifespan_remember_me=dict(type='int', aliases=['ssoSessionMaxLifespanRememberMe']), - supported_locales=dict(type='list', elements='str', aliases=['supportedLocales']), - user_managed_access_allowed=dict(type='bool', aliases=['userManagedAccessAllowed']), - verify_email=dict(type='bool', aliases=['verifyEmail']), - wait_increment_seconds=dict(type='int', aliases=['waitIncrementSeconds']), - ) - - argument_spec.update(meta_args) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['id', 'realm', 'enabled'], - ['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) - - result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) - - # Obtain access token, initialize API - try: - connection_header = get_token(module.params) - except KeycloakError as e: - module.fail_json(msg=str(e)) - - kc = KeycloakAPI(module, connection_header) - - realm = module.params.get('realm') - state = module.params.get('state') - - # convert module parameters to realm representation parameters (if they belong in there) - params_to_ignore = list(keycloak_argument_spec().keys()) + ['state'] - - # Filter and map the parameters names that apply to the role - realm_params = [x for x in module.params - if x not in params_to_ignore and - module.params.get(x) is not None] - - # See whether the realm already exists in Keycloak - before_realm = kc.get_realm_by_id(realm=realm) - - if before_realm is None: - before_realm = {} - - # Build a proposed changeset from parameters given to this module - changeset = {} - - for realm_param in realm_params: - new_param_value = module.params.get(realm_param) - changeset[camel(realm_param)] = new_param_value - - # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) - desired_realm = before_realm.copy() - desired_realm.update(changeset) - - result['proposed'] = sanitize_cr(changeset) - before_realm_sanitized = sanitize_cr(before_realm) - result['existing'] = before_realm_sanitized - - # Cater for when it doesn't exist (an empty dict) - if not before_realm: - if state == 'absent': - # Do nothing and exit - if module._diff: - result['diff'] = dict(before='', after='') - result['changed'] = False - result['end_state'] = {} - result['msg'] = 'Realm does not exist, doing nothing.' - module.exit_json(**result) - - # Process a creation - result['changed'] = True - - if 'id' not in desired_realm: - module.fail_json(msg='id needs to be specified when creating a new realm') - - if module._diff: - result['diff'] = dict(before='', after=sanitize_cr(desired_realm)) - - if module.check_mode: - module.exit_json(**result) - - # create it - kc.create_realm(desired_realm) - after_realm = kc.get_realm_by_id(desired_realm['id']) - - result['end_state'] = sanitize_cr(after_realm) - - result['msg'] = 'Realm %s has been created.' % desired_realm['id'] - module.exit_json(**result) - - else: - if state == 'present': - # Process an update - - # doing an update - result['changed'] = True - if module.check_mode: - # We can only compare the current realm with the proposed updates we have - if module._diff: - result['diff'] = dict(before=before_realm_sanitized, - after=sanitize_cr(desired_realm)) - result['changed'] = (before_realm != desired_realm) - - module.exit_json(**result) - - # do the update - kc.update_realm(desired_realm, realm=realm) - - after_realm = kc.get_realm_by_id(realm=realm) - - if before_realm == after_realm: - result['changed'] = False - - result['end_state'] = sanitize_cr(after_realm) - - if module._diff: - result['diff'] = dict(before=before_realm_sanitized, - after=sanitize_cr(after_realm)) - - result['msg'] = 'Realm %s has been updated.' % desired_realm['id'] - module.exit_json(**result) - - else: - # Process a deletion (because state was not 'present') - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=before_realm_sanitized, after='') - - if module.check_mode: - module.exit_json(**result) - - # delete it - kc.delete_realm(realm=realm) - - result['proposed'] = {} - result['end_state'] = {} - - result['msg'] = 'Realm %s has been deleted.' % before_realm['id'] - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_realm_info.py b/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_realm_info.py deleted file mode 100644 index a84c9dc7..00000000 --- a/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_realm_info.py +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: keycloak_realm_info - -short_description: Allows obtaining Keycloak realm public information via Keycloak API - -version_added: 4.3.0 - -description: - - This module allows you to get Keycloak realm public information via the Keycloak REST API. - - - The names of module options are snake_cased versions of the camelCase ones found in the - Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). - - - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will - be returned that way by this module. You may pass single values for attributes when calling the module, - and this will be translated into a list suitable for the API. - -options: - auth_keycloak_url: - description: - - URL to the Keycloak instance. - type: str - required: true - aliases: - - url - validate_certs: - description: - - Verify TLS certificates (do not disable this in production). - type: bool - default: yes - - realm: - type: str - description: - - They Keycloak realm ID. - default: 'master' - -author: - - Fynn Chen (@fynncfchen) -''' - -EXAMPLES = ''' -- name: Get a Keycloak public key - community.general.keycloak_realm_info: - realm: MyCustomRealm - auth_keycloak_url: https://auth.example.com/auth - delegate_to: localhost -''' - -RETURN = ''' -msg: - description: Message as to what action was taken. - returned: always - type: str - -realm_info: - description: - - Representation of the realm public infomation. - returned: always - type: dict - contains: - realm: - description: Realm ID. - type: str - returned: always - sample: MyRealm - public_key: - description: Public key of the realm. - type: str - returned: always - sample: MIIBIjANBgkqhkiG9w0BAQEFAAO... - token-service: - description: Token endpoint URL. - type: str - returned: always - sample: https://auth.example.com/auth/realms/MyRealm/protocol/openid-connect - account-service: - description: Account console URL. - type: str - returned: always - sample: https://auth.example.com/auth/realms/MyRealm/account - tokens-not-before: - description: The token not before. - type: int - returned: always - sample: 0 -''' - -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI -from ansible.module_utils.basic import AnsibleModule - - -def main(): - """ - Module execution - - :return: - """ - argument_spec = dict( - auth_keycloak_url=dict(type='str', aliases=['url'], required=True, no_log=False), - validate_certs=dict(type='bool', default=True), - - realm=dict(default='master'), - ) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) - - result = dict(changed=False, msg='', realm_info='') - - kc = KeycloakAPI(module, {}) - - realm = module.params.get('realm') - - realm_info = kc.get_realm_info_by_id(realm=realm) - - result['realm_info'] = realm_info - result['msg'] = 'Get realm public info successful for ID {realm}'.format(realm=realm) - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_role.py b/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_role.py deleted file mode 100644 index 2dd2438e..00000000 --- a/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_role.py +++ /dev/null @@ -1,368 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2019, Adam Goossens -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: keycloak_role - -short_description: Allows administration of Keycloak roles via Keycloak API - -version_added: 3.4.0 - -description: - - This module allows you to add, remove or modify Keycloak roles via the Keycloak REST API. - It requires access to the REST API via OpenID Connect; the user connecting and the client being - used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate client definition with the scope tailored - to your needs and a user having the expected roles. - - - The names of module options are snake_cased versions of the camelCase ones found in the - Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). - - - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will - be returned that way by this module. You may pass single values for attributes when calling the module, - and this will be translated into a list suitable for the API. - - -options: - state: - description: - - State of the role. - - On C(present), the role will be created if it does not yet exist, or updated with the parameters you provide. - - On C(absent), the role will be removed if it exists. - default: 'present' - type: str - choices: - - present - - absent - - name: - type: str - required: true - description: - - Name of the role. - - This parameter is required. - - description: - type: str - description: - - The role description. - - realm: - type: str - description: - - The Keycloak realm under which this role resides. - default: 'master' - - client_id: - type: str - description: - - If the role is a client role, the client id under which it resides. - - If this parameter is absent, the role is considered a realm role. - - attributes: - type: dict - description: - - A dict of key/value pairs to set as custom attributes for the role. - - Values may be single values (e.g. a string) or a list of strings. - -extends_documentation_fragment: -- community.general.keycloak - - -author: - - Laurent Paumier (@laurpaum) -''' - -EXAMPLES = ''' -- name: Create a Keycloak realm role, authentication with credentials - community.general.keycloak_role: - name: my-new-kc-role - realm: MyCustomRealm - state: present - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - delegate_to: localhost - -- name: Create a Keycloak realm role, authentication with token - community.general.keycloak_role: - name: my-new-kc-role - realm: MyCustomRealm - state: present - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - token: TOKEN - delegate_to: localhost - -- name: Create a Keycloak client role - community.general.keycloak_role: - name: my-new-kc-role - realm: MyCustomRealm - client_id: MyClient - state: present - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - delegate_to: localhost - -- name: Delete a Keycloak role - community.general.keycloak_role: - name: my-role-for-deletion - state: absent - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - delegate_to: localhost - -- name: Create a keycloak role with some custom attributes - community.general.keycloak_role: - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - name: my-new-role - attributes: - attrib1: value1 - attrib2: value2 - attrib3: - - with - - numerous - - individual - - list - - items - delegate_to: localhost -''' - -RETURN = ''' -msg: - description: Message as to what action was taken. - returned: always - type: str - sample: "Role myrole has been updated" - -proposed: - description: Representation of proposed role. - returned: always - type: dict - sample: { - "description": "My updated test description" - } - -existing: - description: Representation of existing role. - returned: always - type: dict - sample: { - "attributes": {}, - "clientRole": true, - "composite": false, - "containerId": "9f03eb61-a826-4771-a9fd-930e06d2d36a", - "description": "My client test role", - "id": "561703dd-0f38-45ff-9a5a-0c978f794547", - "name": "myrole" - } - -end_state: - description: Representation of role after module execution (sample is truncated). - returned: on success - type: dict - sample: { - "attributes": {}, - "clientRole": true, - "composite": false, - "containerId": "9f03eb61-a826-4771-a9fd-930e06d2d36a", - "description": "My updated client test role", - "id": "561703dd-0f38-45ff-9a5a-0c978f794547", - "name": "myrole" - } -''' - -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError -from ansible.module_utils.basic import AnsibleModule - - -def main(): - """ - Module execution - - :return: - """ - argument_spec = keycloak_argument_spec() - - meta_args = dict( - state=dict(type='str', default='present', choices=['present', 'absent']), - name=dict(type='str', required=True), - description=dict(type='str'), - realm=dict(type='str', default='master'), - client_id=dict(type='str'), - attributes=dict(type='dict'), - ) - - argument_spec.update(meta_args) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) - - result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) - - # Obtain access token, initialize API - try: - connection_header = get_token(module.params) - except KeycloakError as e: - module.fail_json(msg=str(e)) - - kc = KeycloakAPI(module, connection_header) - - realm = module.params.get('realm') - clientid = module.params.get('client_id') - name = module.params.get('name') - state = module.params.get('state') - - # attributes in Keycloak have their values returned as lists - # via the API. attributes is a dict, so we'll transparently convert - # the values to lists. - if module.params.get('attributes') is not None: - for key, val in module.params['attributes'].items(): - module.params['attributes'][key] = [val] if not isinstance(val, list) else val - - # Filter and map the parameters names that apply to the role - role_params = [x for x in module.params - if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'client_id', 'composites'] and - module.params.get(x) is not None] - - # See if it already exists in Keycloak - if clientid is None: - before_role = kc.get_realm_role(name, realm) - else: - before_role = kc.get_client_role(name, clientid, realm) - - if before_role is None: - before_role = {} - - # Build a proposed changeset from parameters given to this module - changeset = {} - - for param in role_params: - new_param_value = module.params.get(param) - old_value = before_role[param] if param in before_role else None - if new_param_value != old_value: - changeset[camel(param)] = new_param_value - - # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) - desired_role = before_role.copy() - desired_role.update(changeset) - - result['proposed'] = changeset - result['existing'] = before_role - - # Cater for when it doesn't exist (an empty dict) - if not before_role: - if state == 'absent': - # Do nothing and exit - if module._diff: - result['diff'] = dict(before='', after='') - result['changed'] = False - result['end_state'] = {} - result['msg'] = 'Role does not exist, doing nothing.' - module.exit_json(**result) - - # Process a creation - result['changed'] = True - - if name is None: - module.fail_json(msg='name must be specified when creating a new role') - - if module._diff: - result['diff'] = dict(before='', after=desired_role) - - if module.check_mode: - module.exit_json(**result) - - # create it - if clientid is None: - kc.create_realm_role(desired_role, realm) - after_role = kc.get_realm_role(name, realm) - else: - kc.create_client_role(desired_role, clientid, realm) - after_role = kc.get_client_role(name, clientid, realm) - - result['end_state'] = after_role - - result['msg'] = 'Role {name} has been created'.format(name=name) - module.exit_json(**result) - - else: - if state == 'present': - # Process an update - - # no changes - if desired_role == before_role: - result['changed'] = False - result['end_state'] = desired_role - result['msg'] = "No changes required to role {name}.".format(name=name) - module.exit_json(**result) - - # doing an update - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=before_role, after=desired_role) - - if module.check_mode: - module.exit_json(**result) - - # do the update - if clientid is None: - kc.update_realm_role(desired_role, realm) - after_role = kc.get_realm_role(name, realm) - else: - kc.update_client_role(desired_role, clientid, realm) - after_role = kc.get_client_role(name, clientid, realm) - - result['end_state'] = after_role - - result['msg'] = "Role {name} has been updated".format(name=name) - module.exit_json(**result) - - else: - # Process a deletion (because state was not 'present') - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=before_role, after='') - - if module.check_mode: - module.exit_json(**result) - - # delete it - if clientid is None: - kc.delete_realm_role(name, realm) - else: - kc.delete_client_role(name, clientid, realm) - - result['end_state'] = {} - - result['msg'] = "Role {name} has been deleted".format(name=name) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_user_federation.py b/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_user_federation.py deleted file mode 100644 index 4d623a48..00000000 --- a/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_user_federation.py +++ /dev/null @@ -1,1009 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: keycloak_user_federation - -short_description: Allows administration of Keycloak user federations via Keycloak API - -version_added: 3.7.0 - -description: - - This module allows you to add, remove or modify Keycloak user federations via the Keycloak REST API. - It requires access to the REST API via OpenID Connect; the user connecting and the client being - used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate client definition with the scope tailored - to your needs and a user having the expected roles. - - - The names of module options are snake_cased versions of the camelCase ones found in the - Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/15.0/rest-api/index.html). - - -options: - state: - description: - - State of the user federation. - - On C(present), the user federation will be created if it does not yet exist, or updated with - the parameters you provide. - - On C(absent), the user federation will be removed if it exists. - default: 'present' - type: str - choices: - - present - - absent - - realm: - description: - - The Keycloak realm under which this user federation resides. - default: 'master' - type: str - - id: - description: - - The unique ID for this user federation. If left empty, the user federation will be searched - by its I(name). - type: str - - name: - description: - - Display name of provider when linked in admin console. - type: str - - provider_id: - description: - - Provider for this user federation. - aliases: - - providerId - type: str - choices: - - ldap - - kerberos - - sssd - - provider_type: - description: - - Component type for user federation (only supported value is C(org.keycloak.storage.UserStorageProvider)). - aliases: - - providerType - default: org.keycloak.storage.UserStorageProvider - type: str - - parent_id: - description: - - Unique ID for the parent of this user federation. Realm ID will be automatically used if left blank. - aliases: - - parentId - type: str - - config: - description: - - Dict specifying the configuration options for the provider; the contents differ depending on - the value of I(provider_id). Examples are given below for C(ldap), C(kerberos) and C(sssd). - It is easiest to obtain valid config values by dumping an already-existing user federation - configuration through check-mode in the I(existing) field. - - The value C(sssd) has been supported since community.general 4.2.0. - type: dict - suboptions: - enabled: - description: - - Enable/disable this user federation. - default: true - type: bool - - priority: - description: - - Priority of provider when doing a user lookup. Lowest first. - default: 0 - type: int - - importEnabled: - description: - - If C(true), LDAP users will be imported into Keycloak DB and synced by the configured - sync policies. - default: true - type: bool - - editMode: - description: - - C(READ_ONLY) is a read-only LDAP store. C(WRITABLE) means data will be synced back to LDAP - on demand. C(UNSYNCED) means user data will be imported, but not synced back to LDAP. - type: str - choices: - - READ_ONLY - - WRITABLE - - UNSYNCED - - syncRegistrations: - description: - - Should newly created users be created within LDAP store? Priority effects which - provider is chosen to sync the new user. - default: false - type: bool - - vendor: - description: - - LDAP vendor (provider). - type: str - - usernameLDAPAttribute: - description: - - Name of LDAP attribute, which is mapped as Keycloak username. For many LDAP server - vendors it can be C(uid). For Active directory it can be C(sAMAccountName) or C(cn). - The attribute should be filled for all LDAP user records you want to import from - LDAP to Keycloak. - type: str - - rdnLDAPAttribute: - description: - - Name of LDAP attribute, which is used as RDN (top attribute) of typical user DN. - Usually it's the same as Username LDAP attribute, however it is not required. For - example for Active directory, it is common to use C(cn) as RDN attribute when - username attribute might be C(sAMAccountName). - type: str - - uuidLDAPAttribute: - description: - - Name of LDAP attribute, which is used as unique object identifier (UUID) for objects - in LDAP. For many LDAP server vendors, it is C(entryUUID); however some are different. - For example for Active directory it should be C(objectGUID). If your LDAP server does - not support the notion of UUID, you can use any other attribute that is supposed to - be unique among LDAP users in tree. - type: str - - userObjectClasses: - description: - - All values of LDAP objectClass attribute for users in LDAP divided by comma. - For example C(inetOrgPerson, organizationalPerson). Newly created Keycloak users - will be written to LDAP with all those object classes and existing LDAP user records - are found just if they contain all those object classes. - type: str - - connectionUrl: - description: - - Connection URL to your LDAP server. - type: str - - usersDn: - description: - - Full DN of LDAP tree where your users are. This DN is the parent of LDAP users. - type: str - - customUserSearchFilter: - description: - - Additional LDAP Filter for filtering searched users. Leave this empty if you don't - need additional filter. - type: str - - searchScope: - description: - - For one level, the search applies only for users in the DNs specified by User DNs. - For subtree, the search applies to the whole subtree. See LDAP documentation for - more details. - default: '1' - type: str - choices: - - '1' - - '2' - - authType: - description: - - Type of the Authentication method used during LDAP Bind operation. It is used in - most of the requests sent to the LDAP server. - default: 'none' - type: str - choices: - - none - - simple - - bindDn: - description: - - DN of LDAP user which will be used by Keycloak to access LDAP server. - type: str - - bindCredential: - description: - - Password of LDAP admin. - type: str - - startTls: - description: - - Encrypts the connection to LDAP using STARTTLS, which will disable connection pooling. - default: false - type: bool - - usePasswordModifyExtendedOp: - description: - - Use the LDAPv3 Password Modify Extended Operation (RFC-3062). The password modify - extended operation usually requires that LDAP user already has password in the LDAP - server. So when this is used with 'Sync Registrations', it can be good to add also - 'Hardcoded LDAP attribute mapper' with randomly generated initial password. - default: false - type: bool - - validatePasswordPolicy: - description: - - Determines if Keycloak should validate the password with the realm password policy - before updating it. - default: false - type: bool - - trustEmail: - description: - - If enabled, email provided by this provider is not verified even if verification is - enabled for the realm. - default: false - type: bool - - useTruststoreSpi: - description: - - Specifies whether LDAP connection will use the truststore SPI with the truststore - configured in standalone.xml/domain.xml. C(Always) means that it will always use it. - C(Never) means that it will not use it. C(Only for ldaps) means that it will use if - your connection URL use ldaps. Note even if standalone.xml/domain.xml is not - configured, the default Java cacerts or certificate specified by - C(javax.net.ssl.trustStore) property will be used. - default: ldapsOnly - type: str - choices: - - always - - ldapsOnly - - never - - connectionTimeout: - description: - - LDAP Connection Timeout in milliseconds. - type: int - - readTimeout: - description: - - LDAP Read Timeout in milliseconds. This timeout applies for LDAP read operations. - type: int - - pagination: - description: - - Does the LDAP server support pagination. - default: true - type: bool - - connectionPooling: - description: - - Determines if Keycloak should use connection pooling for accessing LDAP server. - default: true - type: bool - - connectionPoolingAuthentication: - description: - - A list of space-separated authentication types of connections that may be pooled. - type: str - choices: - - none - - simple - - DIGEST-MD5 - - connectionPoolingDebug: - description: - - A string that indicates the level of debug output to produce. Example valid values are - C(fine) (trace connection creation and removal) and C(all) (all debugging information). - type: str - - connectionPoolingInitSize: - description: - - The number of connections per connection identity to create when initially creating a - connection for the identity. - type: int - - connectionPoolingMaxSize: - description: - - The maximum number of connections per connection identity that can be maintained - concurrently. - type: int - - connectionPoolingPrefSize: - description: - - The preferred number of connections per connection identity that should be maintained - concurrently. - type: int - - connectionPoolingProtocol: - description: - - A list of space-separated protocol types of connections that may be pooled. - Valid types are C(plain) and C(ssl). - type: str - - connectionPoolingTimeout: - description: - - The number of milliseconds that an idle connection may remain in the pool without - being closed and removed from the pool. - type: int - - allowKerberosAuthentication: - description: - - Enable/disable HTTP authentication of users with SPNEGO/Kerberos tokens. The data - about authenticated users will be provisioned from this LDAP server. - default: false - type: bool - - kerberosRealm: - description: - - Name of kerberos realm. - type: str - - serverPrincipal: - description: - - Full name of server principal for HTTP service including server and domain name. For - example C(HTTP/host.foo.org@FOO.ORG). Use C(*) to accept any service principal in the - KeyTab file. - type: str - - keyTab: - description: - - Location of Kerberos KeyTab file containing the credentials of server principal. For - example C(/etc/krb5.keytab). - type: str - - debug: - description: - - Enable/disable debug logging to standard output for Krb5LoginModule. - type: bool - - useKerberosForPasswordAuthentication: - description: - - Use Kerberos login module for authenticate username/password against Kerberos server - instead of authenticating against LDAP server with Directory Service API. - default: false - type: bool - - allowPasswordAuthentication: - description: - - Enable/disable possibility of username/password authentication against Kerberos database. - type: bool - - batchSizeForSync: - description: - - Count of LDAP users to be imported from LDAP to Keycloak within a single transaction. - default: 1000 - type: int - - fullSyncPeriod: - description: - - Period for full synchronization in seconds. - default: -1 - type: int - - changedSyncPeriod: - description: - - Period for synchronization of changed or newly created LDAP users in seconds. - default: -1 - type: int - - updateProfileFirstLogin: - description: - - Update profile on first login. - type: bool - - cachePolicy: - description: - - Cache Policy for this storage provider. - type: str - default: 'DEFAULT' - choices: - - DEFAULT - - EVICT_DAILY - - EVICT_WEEKLY - - MAX_LIFESPAN - - NO_CACHE - - evictionDay: - description: - - Day of the week the entry will become invalid on. - type: str - - evictionHour: - description: - - Hour of day the entry will become invalid on. - type: str - - evictionMinute: - description: - - Minute of day the entry will become invalid on. - type: str - - maxLifespan: - description: - - Max lifespan of cache entry in milliseconds. - type: int - - mappers: - description: - - A list of dicts defining mappers associated with this Identity Provider. - type: list - elements: dict - suboptions: - id: - description: - - Unique ID of this mapper. - type: str - - name: - description: - - Name of the mapper. If no ID is given, the mapper will be searched by name. - type: str - - parentId: - description: - - Unique ID for the parent of this mapper. ID of the user federation will automatically - be used if left blank. - type: str - - providerId: - description: - - The mapper type for this mapper (for instance C(user-attribute-ldap-mapper)). - type: str - - providerType: - description: - - Component type for this mapper (only supported value is C(org.keycloak.storage.ldap.mappers.LDAPStorageMapper)). - type: str - - config: - description: - - Dict specifying the configuration options for the mapper; the contents differ - depending on the value of I(identityProviderMapper). - type: dict - -extends_documentation_fragment: -- community.general.keycloak - -author: - - Laurent Paumier (@laurpaum) -''' - -EXAMPLES = ''' - - name: Create LDAP user federation - community.general.keycloak_user_federation: - auth_keycloak_url: https://keycloak.example.com/auth - auth_realm: master - auth_username: admin - auth_password: password - realm: my-realm - name: my-ldap - state: present - provider_id: ldap - provider_type: org.keycloak.storage.UserStorageProvider - config: - priority: 0 - enabled: true - cachePolicy: DEFAULT - batchSizeForSync: 1000 - editMode: READ_ONLY - importEnabled: true - syncRegistrations: false - vendor: other - usernameLDAPAttribute: uid - rdnLDAPAttribute: uid - uuidLDAPAttribute: entryUUID - userObjectClasses: inetOrgPerson, organizationalPerson - connectionUrl: ldaps://ldap.example.com:636 - usersDn: ou=Users,dc=example,dc=com - authType: simple - bindDn: cn=directory reader - bindCredential: password - searchScope: 1 - validatePasswordPolicy: false - trustEmail: false - useTruststoreSpi: ldapsOnly - connectionPooling: true - pagination: true - allowKerberosAuthentication: false - debug: false - useKerberosForPasswordAuthentication: false - mappers: - - name: "full name" - providerId: "full-name-ldap-mapper" - providerType: "org.keycloak.storage.ldap.mappers.LDAPStorageMapper" - config: - ldap.full.name.attribute: cn - read.only: true - write.only: false - - - name: Create Kerberos user federation - community.general.keycloak_user_federation: - auth_keycloak_url: https://keycloak.example.com/auth - auth_realm: master - auth_username: admin - auth_password: password - realm: my-realm - name: my-kerberos - state: present - provider_id: kerberos - provider_type: org.keycloak.storage.UserStorageProvider - config: - priority: 0 - enabled: true - cachePolicy: DEFAULT - kerberosRealm: EXAMPLE.COM - serverPrincipal: HTTP/host.example.com@EXAMPLE.COM - keyTab: keytab - allowPasswordAuthentication: false - updateProfileFirstLogin: false - - - name: Create sssd user federation - community.general.keycloak_user_federation: - auth_keycloak_url: https://keycloak.example.com/auth - auth_realm: master - auth_username: admin - auth_password: password - realm: my-realm - name: my-sssd - state: present - provider_id: sssd - provider_type: org.keycloak.storage.UserStorageProvider - config: - priority: 0 - enabled: true - cachePolicy: DEFAULT - - - name: Delete user federation - community.general.keycloak_user_federation: - auth_keycloak_url: https://keycloak.example.com/auth - auth_realm: master - auth_username: admin - auth_password: password - realm: my-realm - name: my-federation - state: absent - -''' - -RETURN = ''' -msg: - description: Message as to what action was taken. - returned: always - type: str - sample: "No changes required to user federation 164bb483-c613-482e-80fe-7f1431308799." - -proposed: - description: Representation of proposed user federation. - returned: always - type: dict - sample: { - "config": { - "allowKerberosAuthentication": "false", - "authType": "simple", - "batchSizeForSync": "1000", - "bindCredential": "**********", - "bindDn": "cn=directory reader", - "cachePolicy": "DEFAULT", - "connectionPooling": "true", - "connectionUrl": "ldaps://ldap.example.com:636", - "debug": "false", - "editMode": "READ_ONLY", - "enabled": "true", - "importEnabled": "true", - "pagination": "true", - "priority": "0", - "rdnLDAPAttribute": "uid", - "searchScope": "1", - "syncRegistrations": "false", - "trustEmail": "false", - "useKerberosForPasswordAuthentication": "false", - "useTruststoreSpi": "ldapsOnly", - "userObjectClasses": "inetOrgPerson, organizationalPerson", - "usernameLDAPAttribute": "uid", - "usersDn": "ou=Users,dc=example,dc=com", - "uuidLDAPAttribute": "entryUUID", - "validatePasswordPolicy": "false", - "vendor": "other" - }, - "name": "ldap", - "providerId": "ldap", - "providerType": "org.keycloak.storage.UserStorageProvider" - } - -existing: - description: Representation of existing user federation. - returned: always - type: dict - sample: { - "config": { - "allowKerberosAuthentication": "false", - "authType": "simple", - "batchSizeForSync": "1000", - "bindCredential": "**********", - "bindDn": "cn=directory reader", - "cachePolicy": "DEFAULT", - "changedSyncPeriod": "-1", - "connectionPooling": "true", - "connectionUrl": "ldaps://ldap.example.com:636", - "debug": "false", - "editMode": "READ_ONLY", - "enabled": "true", - "fullSyncPeriod": "-1", - "importEnabled": "true", - "pagination": "true", - "priority": "0", - "rdnLDAPAttribute": "uid", - "searchScope": "1", - "syncRegistrations": "false", - "trustEmail": "false", - "useKerberosForPasswordAuthentication": "false", - "useTruststoreSpi": "ldapsOnly", - "userObjectClasses": "inetOrgPerson, organizationalPerson", - "usernameLDAPAttribute": "uid", - "usersDn": "ou=Users,dc=example,dc=com", - "uuidLDAPAttribute": "entryUUID", - "validatePasswordPolicy": "false", - "vendor": "other" - }, - "id": "01122837-9047-4ae4-8ca0-6e2e891a765f", - "mappers": [ - { - "config": { - "always.read.value.from.ldap": "false", - "is.mandatory.in.ldap": "false", - "ldap.attribute": "mail", - "read.only": "true", - "user.model.attribute": "email" - }, - "id": "17d60ce2-2d44-4c2c-8b1f-1fba601b9a9f", - "name": "email", - "parentId": "01122837-9047-4ae4-8ca0-6e2e891a765f", - "providerId": "user-attribute-ldap-mapper", - "providerType": "org.keycloak.storage.ldap.mappers.LDAPStorageMapper" - } - ], - "name": "myfed", - "parentId": "myrealm", - "providerId": "ldap", - "providerType": "org.keycloak.storage.UserStorageProvider" - } - -end_state: - description: Representation of user federation after module execution. - returned: on success - type: dict - sample: { - "config": { - "allowPasswordAuthentication": "false", - "cachePolicy": "DEFAULT", - "enabled": "true", - "kerberosRealm": "EXAMPLE.COM", - "keyTab": "/etc/krb5.keytab", - "priority": "0", - "serverPrincipal": "HTTP/host.example.com@EXAMPLE.COM", - "updateProfileFirstLogin": "false" - }, - "id": "cf52ae4f-4471-4435-a0cf-bb620cadc122", - "mappers": [], - "name": "kerberos", - "parentId": "myrealm", - "providerId": "kerberos", - "providerType": "org.keycloak.storage.UserStorageProvider" - } -''' - -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode -from copy import deepcopy - - -def sanitize(comp): - compcopy = deepcopy(comp) - if 'config' in compcopy: - compcopy['config'] = dict((k, v[0]) for k, v in compcopy['config'].items()) - if 'bindCredential' in compcopy['config']: - compcopy['config']['bindCredential'] = '**********' - if 'mappers' in compcopy: - for mapper in compcopy['mappers']: - if 'config' in mapper: - mapper['config'] = dict((k, v[0]) for k, v in mapper['config'].items()) - return compcopy - - -def main(): - """ - Module execution - - :return: - """ - argument_spec = keycloak_argument_spec() - - config_spec = dict( - allowKerberosAuthentication=dict(type='bool', default=False), - allowPasswordAuthentication=dict(type='bool'), - authType=dict(type='str', choices=['none', 'simple'], default='none'), - batchSizeForSync=dict(type='int', default=1000), - bindCredential=dict(type='str', no_log=True), - bindDn=dict(type='str'), - cachePolicy=dict(type='str', choices=['DEFAULT', 'EVICT_DAILY', 'EVICT_WEEKLY', 'MAX_LIFESPAN', 'NO_CACHE'], default='DEFAULT'), - changedSyncPeriod=dict(type='int', default=-1), - connectionPooling=dict(type='bool', default=True), - connectionPoolingAuthentication=dict(type='str', choices=['none', 'simple', 'DIGEST-MD5']), - connectionPoolingDebug=dict(type='str'), - connectionPoolingInitSize=dict(type='int'), - connectionPoolingMaxSize=dict(type='int'), - connectionPoolingPrefSize=dict(type='int'), - connectionPoolingProtocol=dict(type='str'), - connectionPoolingTimeout=dict(type='int'), - connectionTimeout=dict(type='int'), - connectionUrl=dict(type='str'), - customUserSearchFilter=dict(type='str'), - debug=dict(type='bool'), - editMode=dict(type='str', choices=['READ_ONLY', 'WRITABLE', 'UNSYNCED']), - enabled=dict(type='bool', default=True), - evictionDay=dict(type='str'), - evictionHour=dict(type='str'), - evictionMinute=dict(type='str'), - fullSyncPeriod=dict(type='int', default=-1), - importEnabled=dict(type='bool', default=True), - kerberosRealm=dict(type='str'), - keyTab=dict(type='str', no_log=False), - maxLifespan=dict(type='int'), - pagination=dict(type='bool', default=True), - priority=dict(type='int', default=0), - rdnLDAPAttribute=dict(type='str'), - readTimeout=dict(type='int'), - searchScope=dict(type='str', choices=['1', '2'], default='1'), - serverPrincipal=dict(type='str'), - startTls=dict(type='bool', default=False), - syncRegistrations=dict(type='bool', default=False), - trustEmail=dict(type='bool', default=False), - updateProfileFirstLogin=dict(type='bool'), - useKerberosForPasswordAuthentication=dict(type='bool', default=False), - usePasswordModifyExtendedOp=dict(type='bool', default=False, no_log=False), - useTruststoreSpi=dict(type='str', choices=['always', 'ldapsOnly', 'never'], default='ldapsOnly'), - userObjectClasses=dict(type='str'), - usernameLDAPAttribute=dict(type='str'), - usersDn=dict(type='str'), - uuidLDAPAttribute=dict(type='str'), - validatePasswordPolicy=dict(type='bool', default=False), - vendor=dict(type='str'), - ) - - mapper_spec = dict( - id=dict(type='str'), - name=dict(type='str'), - parentId=dict(type='str'), - providerId=dict(type='str'), - providerType=dict(type='str'), - config=dict(type='dict'), - ) - - meta_args = dict( - config=dict(type='dict', options=config_spec), - state=dict(type='str', default='present', choices=['present', 'absent']), - realm=dict(type='str', default='master'), - id=dict(type='str'), - name=dict(type='str'), - provider_id=dict(type='str', aliases=['providerId'], choices=['ldap', 'kerberos', 'sssd']), - provider_type=dict(type='str', aliases=['providerType'], default='org.keycloak.storage.UserStorageProvider'), - parent_id=dict(type='str', aliases=['parentId']), - mappers=dict(type='list', elements='dict', options=mapper_spec), - ) - - argument_spec.update(meta_args) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['id', 'name'], - ['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) - - result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) - - # Obtain access token, initialize API - try: - connection_header = get_token(module.params) - except KeycloakError as e: - module.fail_json(msg=str(e)) - - kc = KeycloakAPI(module, connection_header) - - realm = module.params.get('realm') - state = module.params.get('state') - config = module.params.get('config') - mappers = module.params.get('mappers') - cid = module.params.get('id') - name = module.params.get('name') - - # Keycloak API expects config parameters to be arrays containing a single string element - if config is not None: - module.params['config'] = dict((k, [str(v).lower() if not isinstance(v, str) else v]) - for k, v in config.items() if config[k] is not None) - - if mappers is not None: - for mapper in mappers: - if mapper.get('config') is not None: - mapper['config'] = dict((k, [str(v).lower() if not isinstance(v, str) else v]) - for k, v in mapper['config'].items() if mapper['config'][k] is not None) - - # Filter and map the parameters names that apply - comp_params = [x for x in module.params - if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'mappers'] and - module.params.get(x) is not None] - - # See if it already exists in Keycloak - if cid is None: - found = kc.get_components(urlencode(dict(type='org.keycloak.storage.UserStorageProvider', parent=realm, name=name)), realm) - if len(found) > 1: - module.fail_json(msg='No ID given and found multiple user federations with name `{name}`. Cannot continue.'.format(name=name)) - before_comp = next(iter(found), None) - if before_comp is not None: - cid = before_comp['id'] - else: - before_comp = kc.get_component(cid, realm) - - if before_comp is None: - before_comp = {} - - # if user federation exists, get associated mappers - if cid is not None and before_comp: - before_comp['mappers'] = sorted(kc.get_components(urlencode(dict(parent=cid)), realm), key=lambda x: x.get('name')) - - # Build a proposed changeset from parameters given to this module - changeset = {} - - for param in comp_params: - new_param_value = module.params.get(param) - old_value = before_comp[camel(param)] if camel(param) in before_comp else None - if param == 'mappers': - new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value] - if new_param_value != old_value: - changeset[camel(param)] = new_param_value - - # special handling of mappers list to allow change detection - if module.params.get('mappers') is not None: - if module.params['provider_id'] in ['kerberos', 'sssd']: - module.fail_json(msg='Cannot configure mappers for {type} provider.'.format(type=module.params['provider_id'])) - for change in module.params['mappers']: - change = dict((k, v) for k, v in change.items() if change[k] is not None) - if change.get('id') is None and change.get('name') is None: - module.fail_json(msg='Either `name` or `id` has to be specified on each mapper.') - if cid is None: - old_mapper = {} - elif change.get('id') is not None: - old_mapper = kc.get_component(change['id'], realm) - if old_mapper is None: - old_mapper = {} - else: - found = kc.get_components(urlencode(dict(parent=cid, name=change['name'])), realm) - if len(found) > 1: - module.fail_json(msg='Found multiple mappers with name `{name}`. Cannot continue.'.format(name=change['name'])) - if len(found) == 1: - old_mapper = found[0] - else: - old_mapper = {} - new_mapper = old_mapper.copy() - new_mapper.update(change) - if new_mapper != old_mapper: - if changeset.get('mappers') is None: - changeset['mappers'] = list() - changeset['mappers'].append(new_mapper) - - # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) - desired_comp = before_comp.copy() - desired_comp.update(changeset) - - result['proposed'] = sanitize(changeset) - result['existing'] = sanitize(before_comp) - - # Cater for when it doesn't exist (an empty dict) - if not before_comp: - if state == 'absent': - # Do nothing and exit - if module._diff: - result['diff'] = dict(before='', after='') - result['changed'] = False - result['end_state'] = {} - result['msg'] = 'User federation does not exist; doing nothing.' - module.exit_json(**result) - - # Process a creation - result['changed'] = True - - if module._diff: - result['diff'] = dict(before='', after=sanitize(desired_comp)) - - if module.check_mode: - module.exit_json(**result) - - # create it - desired_comp = desired_comp.copy() - updated_mappers = desired_comp.pop('mappers', []) - after_comp = kc.create_component(desired_comp, realm) - - for mapper in updated_mappers: - found = kc.get_components(urlencode(dict(parent=cid, name=mapper['name'])), realm) - if len(found) > 1: - module.fail_json(msg='Found multiple mappers with name `{name}`. Cannot continue.'.format(name=mapper['name'])) - if len(found) == 1: - old_mapper = found[0] - else: - old_mapper = {} - - new_mapper = old_mapper.copy() - new_mapper.update(mapper) - - if new_mapper.get('id') is not None: - kc.update_component(new_mapper, realm) - else: - if new_mapper.get('parentId') is None: - new_mapper['parentId'] = after_comp['id'] - mapper = kc.create_component(new_mapper, realm) - - after_comp['mappers'] = updated_mappers - result['end_state'] = sanitize(after_comp) - - result['msg'] = "User federation {id} has been created".format(id=after_comp['id']) - module.exit_json(**result) - - else: - if state == 'present': - # Process an update - - # no changes - if desired_comp == before_comp: - result['changed'] = False - result['end_state'] = sanitize(desired_comp) - result['msg'] = "No changes required to user federation {id}.".format(id=cid) - module.exit_json(**result) - - # doing an update - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=sanitize(before_comp), after=sanitize(desired_comp)) - - if module.check_mode: - module.exit_json(**result) - - # do the update - desired_comp = desired_comp.copy() - updated_mappers = desired_comp.pop('mappers', []) - kc.update_component(desired_comp, realm) - after_comp = kc.get_component(cid, realm) - - for mapper in updated_mappers: - if mapper.get('id') is not None: - kc.update_component(mapper, realm) - else: - if mapper.get('parentId') is None: - mapper['parentId'] = desired_comp['id'] - mapper = kc.create_component(mapper, realm) - - after_comp['mappers'] = updated_mappers - result['end_state'] = sanitize(after_comp) - - result['msg'] = "User federation {id} has been updated".format(id=cid) - module.exit_json(**result) - - elif state == 'absent': - # Process a deletion - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=sanitize(before_comp), after='') - - if module.check_mode: - module.exit_json(**result) - - # delete it - kc.delete_component(cid, realm) - - result['end_state'] = {} - - result['msg'] = "User federation {id} has been deleted".format(id=cid) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/identity/onepassword_info.py b/ansible_collections/community/general/plugins/modules/identity/onepassword_info.py deleted file mode 100644 index 95ef7c12..00000000 --- a/ansible_collections/community/general/plugins/modules/identity/onepassword_info.py +++ /dev/null @@ -1,384 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, Ryan Conway (@rylon) -# (c) 2018, Scott Buchanan (onepassword.py used as starting point) -# (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' -module: onepassword_info -author: - - Ryan Conway (@Rylon) -requirements: - - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/) -notes: - - Tested with C(op) version 0.5.5 - - "Based on the C(onepassword) lookup plugin by Scott Buchanan ." -short_description: Gather items from 1Password -description: - - M(community.general.onepassword_info) wraps the C(op) command line utility to fetch data about one or more 1Password items. - - A fatal error occurs if any of the items being searched for can not be found. - - Recommend using with the C(no_log) option to avoid logging the values of the secrets being retrieved. - - This module was called C(onepassword_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.onepassword_info) module no longer returns C(ansible_facts)! - You must now use the C(register) option to use the facts in other tasks. -options: - search_terms: - type: list - elements: dict - description: - - A list of one or more search terms. - - Each search term can either be a simple string or it can be a dictionary for more control. - - When passing a simple string, I(field) is assumed to be C(password). - - When passing a dictionary, the following fields are available. - suboptions: - name: - type: str - description: - - The name of the 1Password item to search for (required). - field: - type: str - description: - - The name of the field to search for within this item (optional, defaults to "password" (or "document" if the item has an attachment). - section: - type: str - description: - - The name of a section within this item containing the specified field (optional, will search all sections if not specified). - vault: - type: str - description: - - The name of the particular 1Password vault to search, useful if your 1Password user has access to multiple vaults (optional). - required: True - auto_login: - type: dict - description: - - A dictionary containing authentication details. If this is set, M(community.general.onepassword_info) - will attempt to sign in to 1Password automatically. - - Without this option, you must have already logged in via the 1Password CLI before running Ansible. - - It is B(highly) recommended to store 1Password credentials in an Ansible Vault. Ensure that the key used to encrypt - the Ansible Vault is equal to or greater in strength than the 1Password master password. - suboptions: - subdomain: - type: str - description: - - 1Password subdomain name (.1password.com). - - If this is not specified, the most recent subdomain will be used. - username: - type: str - description: - - 1Password username. - - Only required for initial sign in. - master_password: - type: str - description: - - The master password for your subdomain. - - This is always required when specifying C(auto_login). - required: True - secret_key: - type: str - description: - - The secret key for your subdomain. - - Only required for initial sign in. - default: {} - required: False - cli_path: - type: path - description: Used to specify the exact path to the C(op) command line interface - required: False - default: 'op' -''' - -EXAMPLES = ''' -# Gather secrets from 1Password, assuming there is a 'password' field: -- name: Get a password - community.general.onepassword_info: - search_terms: My 1Password item - delegate_to: localhost - register: my_1password_item - no_log: true # Don't want to log the secrets to the console! - -# Gather secrets from 1Password, with more advanced search terms: -- name: Get a password - community.general.onepassword_info: - search_terms: - - name: My 1Password item - field: Custom field name # optional, defaults to 'password' - section: Custom section name # optional, defaults to 'None' - vault: Name of the vault # optional, only necessary if there is more than 1 Vault available - delegate_to: localhost - register: my_1password_item - no_log: True # Don't want to log the secrets to the console! - -# Gather secrets combining simple and advanced search terms to retrieve two items, one of which we fetch two -# fields. In the first 'password' is fetched, as a field name is not specified (default behaviour) and in the -# second, 'Custom field name' is fetched, as that is specified explicitly. -- name: Get a password - community.general.onepassword_info: - search_terms: - - My 1Password item # 'name' is optional when passing a simple string... - - name: My Other 1Password item # ...but it can also be set for consistency - - name: My 1Password item - field: Custom field name # optional, defaults to 'password' - section: Custom section name # optional, defaults to 'None' - vault: Name of the vault # optional, only necessary if there is more than 1 Vault available - - name: A 1Password item with document attachment - delegate_to: localhost - register: my_1password_item - no_log: true # Don't want to log the secrets to the console! - -- name: Debug a password (for example) - ansible.builtin.debug: - msg: "{{ my_1password_item['onepassword']['My 1Password item'] }}" -''' - -RETURN = ''' ---- -# One or more dictionaries for each matching item from 1Password, along with the appropriate fields. -# This shows the response you would expect to receive from the third example documented above. -onepassword: - description: Dictionary of each 1password item matching the given search terms, shows what would be returned from the third example above. - returned: success - type: dict - sample: - "My 1Password item": - password: the value of this field - Custom field name: the value of this field - "My Other 1Password item": - password: the value of this field - "A 1Password item with document attachment": - document: the contents of the document attached to this item -''' - - -import errno -import json -import os -import re - -from subprocess import Popen, PIPE - -from ansible.module_utils.common.text.converters import to_bytes, to_native -from ansible.module_utils.basic import AnsibleModule - - -class AnsibleModuleError(Exception): - def __init__(self, results): - self.results = results - - def __repr__(self): - return self.results - - -class OnePasswordInfo(object): - - def __init__(self): - self.cli_path = module.params.get('cli_path') - self.config_file_path = '~/.op/config' - self.auto_login = module.params.get('auto_login') - self.logged_in = False - self.token = None - - terms = module.params.get('search_terms') - self.terms = self.parse_search_terms(terms) - - def _run(self, args, expected_rc=0, command_input=None, ignore_errors=False): - if self.token: - # Adds the session token to all commands if we're logged in. - args += [to_bytes('--session=') + self.token] - - command = [self.cli_path] + args - p = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE) - out, err = p.communicate(input=command_input) - rc = p.wait() - if not ignore_errors and rc != expected_rc: - raise AnsibleModuleError(to_native(err)) - return rc, out, err - - def _parse_field(self, data_json, item_id, field_name, section_title=None): - data = json.loads(data_json) - - if ('documentAttributes' in data['details']): - # This is actually a document, let's fetch the document data instead! - document = self._run(["get", "document", data['overview']['title']]) - return {'document': document[1].strip()} - - else: - # This is not a document, let's try to find the requested field - - # Some types of 1Password items have a 'password' field directly alongside the 'fields' attribute, - # not inside it, so we need to check there first. - if (field_name in data['details']): - return {field_name: data['details'][field_name]} - - # Otherwise we continue looking inside the 'fields' attribute for the specified field. - else: - if section_title is None: - for field_data in data['details'].get('fields', []): - if field_data.get('name', '').lower() == field_name.lower(): - return {field_name: field_data.get('value', '')} - - # Not found it yet, so now lets see if there are any sections defined - # and search through those for the field. If a section was given, we skip - # any non-matching sections, otherwise we search them all until we find the field. - for section_data in data['details'].get('sections', []): - if section_title is not None and section_title.lower() != section_data['title'].lower(): - continue - for field_data in section_data.get('fields', []): - if field_data.get('t', '').lower() == field_name.lower(): - return {field_name: field_data.get('v', '')} - - # We will get here if the field could not be found in any section and the item wasn't a document to be downloaded. - optional_section_title = '' if section_title is None else " in the section '%s'" % section_title - module.fail_json(msg="Unable to find an item in 1Password named '%s' with the field '%s'%s." % (item_id, field_name, optional_section_title)) - - def parse_search_terms(self, terms): - processed_terms = [] - - for term in terms: - if not isinstance(term, dict): - term = {'name': term} - - if 'name' not in term: - module.fail_json(msg="Missing required 'name' field from search term, got: '%s'" % to_native(term)) - - term['field'] = term.get('field', 'password') - term['section'] = term.get('section', None) - term['vault'] = term.get('vault', None) - - processed_terms.append(term) - - return processed_terms - - def get_raw(self, item_id, vault=None): - try: - args = ["get", "item", item_id] - if vault is not None: - args += ['--vault={0}'.format(vault)] - rc, output, dummy = self._run(args) - return output - - except Exception as e: - if re.search(".*not found.*", to_native(e)): - module.fail_json(msg="Unable to find an item in 1Password named '%s'." % item_id) - else: - module.fail_json(msg="Unexpected error attempting to find an item in 1Password named '%s': %s" % (item_id, to_native(e))) - - def get_field(self, item_id, field, section=None, vault=None): - output = self.get_raw(item_id, vault) - return self._parse_field(output, item_id, field, section) if output != '' else '' - - def full_login(self): - if self.auto_login is not None: - if None in [self.auto_login.get('subdomain'), self.auto_login.get('username'), - self.auto_login.get('secret_key'), self.auto_login.get('master_password')]: - module.fail_json(msg='Unable to perform initial sign in to 1Password. ' - 'subdomain, username, secret_key, and master_password are required to perform initial sign in.') - - args = [ - 'signin', - '{0}.1password.com'.format(self.auto_login['subdomain']), - to_bytes(self.auto_login['username']), - to_bytes(self.auto_login['secret_key']), - '--output=raw', - ] - - try: - rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password'])) - self.token = out.strip() - except AnsibleModuleError as e: - module.fail_json(msg="Failed to perform initial sign in to 1Password: %s" % to_native(e)) - else: - module.fail_json(msg="Unable to perform an initial sign in to 1Password. Please run '%s sigin' " - "or define credentials in 'auto_login'. See the module documentation for details." % self.cli_path) - - def get_token(self): - # If the config file exists, assume an initial signin has taken place and try basic sign in - if os.path.isfile(self.config_file_path): - - if self.auto_login is not None: - - # Since we are not currently signed in, master_password is required at a minimum - if not self.auto_login.get('master_password'): - module.fail_json(msg="Unable to sign in to 1Password. 'auto_login.master_password' is required.") - - # Try signing in using the master_password and a subdomain if one is provided - try: - args = ['signin', '--output=raw'] - - if self.auto_login.get('subdomain'): - args = ['signin', self.auto_login['subdomain'], '--output=raw'] - - rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password'])) - self.token = out.strip() - - except AnsibleModuleError: - self.full_login() - - else: - self.full_login() - - else: - # Attempt a full sign in since there appears to be no existing sign in - self.full_login() - - def assert_logged_in(self): - try: - rc, out, err = self._run(['get', 'account'], ignore_errors=True) - if rc == 0: - self.logged_in = True - if not self.logged_in: - self.get_token() - except OSError as e: - if e.errno == errno.ENOENT: - module.fail_json(msg="1Password CLI tool '%s' not installed in path on control machine" % self.cli_path) - raise e - - def run(self): - result = {} - - self.assert_logged_in() - - for term in self.terms: - value = self.get_field(term['name'], term['field'], term['section'], term['vault']) - - if term['name'] in result: - # If we already have a result for this key, we have to append this result dictionary - # to the existing one. This is only applicable when there is a single item - # in 1Password which has two different fields, and we want to retrieve both of them. - result[term['name']].update(value) - else: - # If this is the first result for this key, simply set it. - result[term['name']] = value - - return result - - -def main(): - global module - module = AnsibleModule( - argument_spec=dict( - cli_path=dict(type='path', default='op'), - auto_login=dict(type='dict', options=dict( - subdomain=dict(type='str'), - username=dict(type='str'), - master_password=dict(required=True, type='str', no_log=True), - secret_key=dict(type='str', no_log=True), - ), default=None), - search_terms=dict(required=True, type='list', elements='dict'), - ), - supports_check_mode=True - ) - - results = {'onepassword': OnePasswordInfo().run()} - - module.exit_json(changed=False, **results) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/identity/opendj/opendj_backendprop.py b/ansible_collections/community/general/plugins/modules/identity/opendj/opendj_backendprop.py deleted file mode 100644 index be118a50..00000000 --- a/ansible_collections/community/general/plugins/modules/identity/opendj/opendj_backendprop.py +++ /dev/null @@ -1,208 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2016, Werner Dijkerman (ikben@werner-dijkerman.nl) -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: opendj_backendprop -short_description: Will update the backend configuration of OpenDJ via the dsconfig set-backend-prop command. -description: - - This module will update settings for OpenDJ with the command set-backend-prop. - - It will check first via de get-backend-prop if configuration needs to be applied. -author: - - Werner Dijkerman (@dj-wasabi) -options: - opendj_bindir: - description: - - The path to the bin directory of OpenDJ. - required: false - default: /opt/opendj/bin - type: path - hostname: - description: - - The hostname of the OpenDJ server. - required: true - type: str - port: - description: - - The Admin port on which the OpenDJ instance is available. - required: true - type: str - username: - description: - - The username to connect to. - required: false - default: cn=Directory Manager - type: str - password: - description: - - The password for the cn=Directory Manager user. - - Either password or passwordfile is needed. - required: false - type: str - passwordfile: - description: - - Location to the password file which holds the password for the cn=Directory Manager user. - - Either password or passwordfile is needed. - required: false - type: path - backend: - description: - - The name of the backend on which the property needs to be updated. - required: true - type: str - name: - description: - - The configuration setting to update. - required: true - type: str - value: - description: - - The value for the configuration item. - required: true - type: str - state: - description: - - If configuration needs to be added/updated - required: false - default: "present" - type: str -''' - -EXAMPLES = ''' - - name: Add or update OpenDJ backend properties - action: opendj_backendprop - hostname=localhost - port=4444 - username="cn=Directory Manager" - password=password - backend=userRoot - name=index-entry-limit - value=5000 -''' - -RETURN = ''' -''' - -from ansible.module_utils.basic import AnsibleModule - - -class BackendProp(object): - - def __init__(self, module): - self._module = module - - def get_property(self, opendj_bindir, hostname, port, username, password_method, backend_name): - my_command = [ - opendj_bindir + '/dsconfig', - 'get-backend-prop', - '-h', hostname, - '--port', str(port), - '--bindDN', username, - '--backend-name', backend_name, - '-n', '-X', '-s' - ] + password_method - rc, stdout, stderr = self._module.run_command(my_command) - if rc == 0: - return stdout - else: - self._module.fail_json(msg="Error message: " + str(stderr)) - - def set_property(self, opendj_bindir, hostname, port, username, password_method, backend_name, name, value): - my_command = [ - opendj_bindir + '/dsconfig', - 'set-backend-prop', - '-h', hostname, - '--port', str(port), - '--bindDN', username, - '--backend-name', backend_name, - '--set', name + ":" + value, - '-n', '-X' - ] + password_method - rc, stdout, stderr = self._module.run_command(my_command) - if rc == 0: - return True - else: - self._module.fail_json(msg="Error message: " + stderr) - - def validate_data(self, data=None, name=None, value=None): - for config_line in data.split('\n'): - if config_line: - split_line = config_line.split() - if split_line[0] == name: - if split_line[1] == value: - return True - return False - - -def main(): - module = AnsibleModule( - argument_spec=dict( - opendj_bindir=dict(default="/opt/opendj/bin", type="path"), - hostname=dict(required=True), - port=dict(required=True), - username=dict(default="cn=Directory Manager", required=False), - password=dict(required=False, no_log=True), - passwordfile=dict(required=False, type="path"), - backend=dict(required=True), - name=dict(required=True), - value=dict(required=True), - state=dict(default="present"), - ), - supports_check_mode=True, - mutually_exclusive=[['password', 'passwordfile']], - required_one_of=[['password', 'passwordfile']] - ) - - opendj_bindir = module.params['opendj_bindir'] - hostname = module.params['hostname'] - port = module.params['port'] - username = module.params['username'] - password = module.params['password'] - passwordfile = module.params['passwordfile'] - backend_name = module.params['backend'] - name = module.params['name'] - value = module.params['value'] - state = module.params['state'] - - if module.params["password"] is not None: - password_method = ['-w', password] - elif module.params["passwordfile"] is not None: - password_method = ['-j', passwordfile] - - opendj = BackendProp(module) - validate = opendj.get_property(opendj_bindir=opendj_bindir, - hostname=hostname, - port=port, - username=username, - password_method=password_method, - backend_name=backend_name) - - if validate: - if not opendj.validate_data(data=validate, name=name, value=value): - if module.check_mode: - module.exit_json(changed=True) - if opendj.set_property(opendj_bindir=opendj_bindir, - hostname=hostname, - port=port, - username=username, - password_method=password_method, - backend_name=backend_name, - name=name, - value=value): - module.exit_json(changed=True) - else: - module.exit_json(changed=False) - else: - module.exit_json(changed=False) - else: - module.exit_json(changed=False) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/idrac_redfish_command.py b/ansible_collections/community/general/plugins/modules/idrac_redfish_command.py deleted file mode 120000 index 9b2bf122..00000000 --- a/ansible_collections/community/general/plugins/modules/idrac_redfish_command.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/redfish/idrac_redfish_command.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/idrac_redfish_config.py b/ansible_collections/community/general/plugins/modules/idrac_redfish_config.py deleted file mode 120000 index ca2f32f0..00000000 --- a/ansible_collections/community/general/plugins/modules/idrac_redfish_config.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/redfish/idrac_redfish_config.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/idrac_redfish_info.py b/ansible_collections/community/general/plugins/modules/idrac_redfish_info.py deleted file mode 120000 index 7048d65f..00000000 --- a/ansible_collections/community/general/plugins/modules/idrac_redfish_info.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/redfish/idrac_redfish_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ilo_redfish_config.py b/ansible_collections/community/general/plugins/modules/ilo_redfish_config.py deleted file mode 120000 index b1846d51..00000000 --- a/ansible_collections/community/general/plugins/modules/ilo_redfish_config.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/redfish/ilo_redfish_config.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ilo_redfish_info.py b/ansible_collections/community/general/plugins/modules/ilo_redfish_info.py deleted file mode 120000 index 45790c3a..00000000 --- a/ansible_collections/community/general/plugins/modules/ilo_redfish_info.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/redfish/ilo_redfish_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/imc_rest.py b/ansible_collections/community/general/plugins/modules/imc_rest.py deleted file mode 120000 index 56347f57..00000000 --- a/ansible_collections/community/general/plugins/modules/imc_rest.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/imc/imc_rest.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/imgadm.py b/ansible_collections/community/general/plugins/modules/imgadm.py deleted file mode 120000 index 9b4ec40e..00000000 --- a/ansible_collections/community/general/plugins/modules/imgadm.py +++ /dev/null @@ -1 +0,0 @@ -cloud/smartos/imgadm.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/infinity.py b/ansible_collections/community/general/plugins/modules/infinity.py deleted file mode 120000 index 85dae982..00000000 --- a/ansible_collections/community/general/plugins/modules/infinity.py +++ /dev/null @@ -1 +0,0 @@ -net_tools/infinity/infinity.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/influxdb_database.py b/ansible_collections/community/general/plugins/modules/influxdb_database.py deleted file mode 120000 index 8a99acd4..00000000 --- a/ansible_collections/community/general/plugins/modules/influxdb_database.py +++ /dev/null @@ -1 +0,0 @@ -database/influxdb/influxdb_database.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/influxdb_query.py b/ansible_collections/community/general/plugins/modules/influxdb_query.py deleted file mode 120000 index a421edaf..00000000 --- a/ansible_collections/community/general/plugins/modules/influxdb_query.py +++ /dev/null @@ -1 +0,0 @@ -database/influxdb/influxdb_query.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/influxdb_retention_policy.py b/ansible_collections/community/general/plugins/modules/influxdb_retention_policy.py deleted file mode 120000 index e143a402..00000000 --- a/ansible_collections/community/general/plugins/modules/influxdb_retention_policy.py +++ /dev/null @@ -1 +0,0 @@ -database/influxdb/influxdb_retention_policy.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/influxdb_user.py b/ansible_collections/community/general/plugins/modules/influxdb_user.py deleted file mode 120000 index 76fbf6f3..00000000 --- a/ansible_collections/community/general/plugins/modules/influxdb_user.py +++ /dev/null @@ -1 +0,0 @@ -database/influxdb/influxdb_user.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/influxdb_write.py b/ansible_collections/community/general/plugins/modules/influxdb_write.py deleted file mode 120000 index c7fa7280..00000000 --- a/ansible_collections/community/general/plugins/modules/influxdb_write.py +++ /dev/null @@ -1 +0,0 @@ -database/influxdb/influxdb_write.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ini_file.py b/ansible_collections/community/general/plugins/modules/ini_file.py deleted file mode 120000 index 1a30612d..00000000 --- a/ansible_collections/community/general/plugins/modules/ini_file.py +++ /dev/null @@ -1 +0,0 @@ -files/ini_file.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/installp.py b/ansible_collections/community/general/plugins/modules/installp.py deleted file mode 120000 index b587b63f..00000000 --- a/ansible_collections/community/general/plugins/modules/installp.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/installp.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/interfaces_file.py b/ansible_collections/community/general/plugins/modules/interfaces_file.py deleted file mode 120000 index 8dddd496..00000000 --- a/ansible_collections/community/general/plugins/modules/interfaces_file.py +++ /dev/null @@ -1 +0,0 @@ -system/interfaces_file.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ip_netns.py b/ansible_collections/community/general/plugins/modules/ip_netns.py deleted file mode 120000 index f936b1b2..00000000 --- a/ansible_collections/community/general/plugins/modules/ip_netns.py +++ /dev/null @@ -1 +0,0 @@ -net_tools/ip_netns.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ipa_config.py b/ansible_collections/community/general/plugins/modules/ipa_config.py deleted file mode 120000 index aea644cc..00000000 --- a/ansible_collections/community/general/plugins/modules/ipa_config.py +++ /dev/null @@ -1 +0,0 @@ -identity/ipa/ipa_config.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ipa_dnsrecord.py b/ansible_collections/community/general/plugins/modules/ipa_dnsrecord.py deleted file mode 120000 index 048da876..00000000 --- a/ansible_collections/community/general/plugins/modules/ipa_dnsrecord.py +++ /dev/null @@ -1 +0,0 @@ -identity/ipa/ipa_dnsrecord.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ipa_dnszone.py b/ansible_collections/community/general/plugins/modules/ipa_dnszone.py deleted file mode 120000 index 96e639a7..00000000 --- a/ansible_collections/community/general/plugins/modules/ipa_dnszone.py +++ /dev/null @@ -1 +0,0 @@ -identity/ipa/ipa_dnszone.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ipa_group.py b/ansible_collections/community/general/plugins/modules/ipa_group.py deleted file mode 120000 index 0188e1ee..00000000 --- a/ansible_collections/community/general/plugins/modules/ipa_group.py +++ /dev/null @@ -1 +0,0 @@ -identity/ipa/ipa_group.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ipa_hbacrule.py b/ansible_collections/community/general/plugins/modules/ipa_hbacrule.py deleted file mode 120000 index d6012e48..00000000 --- a/ansible_collections/community/general/plugins/modules/ipa_hbacrule.py +++ /dev/null @@ -1 +0,0 @@ -identity/ipa/ipa_hbacrule.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ipa_host.py b/ansible_collections/community/general/plugins/modules/ipa_host.py deleted file mode 120000 index 2955e714..00000000 --- a/ansible_collections/community/general/plugins/modules/ipa_host.py +++ /dev/null @@ -1 +0,0 @@ -identity/ipa/ipa_host.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ipa_hostgroup.py b/ansible_collections/community/general/plugins/modules/ipa_hostgroup.py deleted file mode 120000 index ffa70e83..00000000 --- a/ansible_collections/community/general/plugins/modules/ipa_hostgroup.py +++ /dev/null @@ -1 +0,0 @@ -identity/ipa/ipa_hostgroup.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ipa_otpconfig.py b/ansible_collections/community/general/plugins/modules/ipa_otpconfig.py deleted file mode 120000 index 20b2c501..00000000 --- a/ansible_collections/community/general/plugins/modules/ipa_otpconfig.py +++ /dev/null @@ -1 +0,0 @@ -identity/ipa/ipa_otpconfig.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ipa_otptoken.py b/ansible_collections/community/general/plugins/modules/ipa_otptoken.py deleted file mode 120000 index c56575c8..00000000 --- a/ansible_collections/community/general/plugins/modules/ipa_otptoken.py +++ /dev/null @@ -1 +0,0 @@ -identity/ipa/ipa_otptoken.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ipa_pwpolicy.py b/ansible_collections/community/general/plugins/modules/ipa_pwpolicy.py deleted file mode 120000 index da5bb94f..00000000 --- a/ansible_collections/community/general/plugins/modules/ipa_pwpolicy.py +++ /dev/null @@ -1 +0,0 @@ -identity/ipa/ipa_pwpolicy.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ipa_role.py b/ansible_collections/community/general/plugins/modules/ipa_role.py deleted file mode 120000 index c6ad5b7a..00000000 --- a/ansible_collections/community/general/plugins/modules/ipa_role.py +++ /dev/null @@ -1 +0,0 @@ -identity/ipa/ipa_role.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ipa_service.py b/ansible_collections/community/general/plugins/modules/ipa_service.py deleted file mode 120000 index f72a8175..00000000 --- a/ansible_collections/community/general/plugins/modules/ipa_service.py +++ /dev/null @@ -1 +0,0 @@ -identity/ipa/ipa_service.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ipa_subca.py b/ansible_collections/community/general/plugins/modules/ipa_subca.py deleted file mode 120000 index a6402b50..00000000 --- a/ansible_collections/community/general/plugins/modules/ipa_subca.py +++ /dev/null @@ -1 +0,0 @@ -identity/ipa/ipa_subca.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ipa_sudocmd.py b/ansible_collections/community/general/plugins/modules/ipa_sudocmd.py deleted file mode 120000 index d855cf85..00000000 --- a/ansible_collections/community/general/plugins/modules/ipa_sudocmd.py +++ /dev/null @@ -1 +0,0 @@ -identity/ipa/ipa_sudocmd.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ipa_sudocmdgroup.py b/ansible_collections/community/general/plugins/modules/ipa_sudocmdgroup.py deleted file mode 120000 index 09219dbb..00000000 --- a/ansible_collections/community/general/plugins/modules/ipa_sudocmdgroup.py +++ /dev/null @@ -1 +0,0 @@ -identity/ipa/ipa_sudocmdgroup.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ipa_sudorule.py b/ansible_collections/community/general/plugins/modules/ipa_sudorule.py deleted file mode 120000 index c7a6b0c9..00000000 --- a/ansible_collections/community/general/plugins/modules/ipa_sudorule.py +++ /dev/null @@ -1 +0,0 @@ -identity/ipa/ipa_sudorule.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ipa_user.py b/ansible_collections/community/general/plugins/modules/ipa_user.py deleted file mode 120000 index 2b5598af..00000000 --- a/ansible_collections/community/general/plugins/modules/ipa_user.py +++ /dev/null @@ -1 +0,0 @@ -identity/ipa/ipa_user.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ipa_vault.py b/ansible_collections/community/general/plugins/modules/ipa_vault.py deleted file mode 120000 index 82aae7a3..00000000 --- a/ansible_collections/community/general/plugins/modules/ipa_vault.py +++ /dev/null @@ -1 +0,0 @@ -identity/ipa/ipa_vault.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ipify_facts.py b/ansible_collections/community/general/plugins/modules/ipify_facts.py deleted file mode 120000 index b657e999..00000000 --- a/ansible_collections/community/general/plugins/modules/ipify_facts.py +++ /dev/null @@ -1 +0,0 @@ -net_tools/ipify_facts.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ipinfoio_facts.py b/ansible_collections/community/general/plugins/modules/ipinfoio_facts.py deleted file mode 120000 index b671e3e8..00000000 --- a/ansible_collections/community/general/plugins/modules/ipinfoio_facts.py +++ /dev/null @@ -1 +0,0 @@ -net_tools/ipinfoio_facts.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ipmi_boot.py b/ansible_collections/community/general/plugins/modules/ipmi_boot.py deleted file mode 120000 index 27b61b34..00000000 --- a/ansible_collections/community/general/plugins/modules/ipmi_boot.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/ipmi/ipmi_boot.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ipmi_power.py b/ansible_collections/community/general/plugins/modules/ipmi_power.py deleted file mode 120000 index 94bb9c64..00000000 --- a/ansible_collections/community/general/plugins/modules/ipmi_power.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/ipmi/ipmi_power.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/iptables_state.py b/ansible_collections/community/general/plugins/modules/iptables_state.py deleted file mode 120000 index 864608d5..00000000 --- a/ansible_collections/community/general/plugins/modules/iptables_state.py +++ /dev/null @@ -1 +0,0 @@ -system/iptables_state.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ipwcli_dns.py b/ansible_collections/community/general/plugins/modules/ipwcli_dns.py deleted file mode 120000 index 6e5ac859..00000000 --- a/ansible_collections/community/general/plugins/modules/ipwcli_dns.py +++ /dev/null @@ -1 +0,0 @@ -net_tools/ipwcli_dns.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/irc.py b/ansible_collections/community/general/plugins/modules/irc.py deleted file mode 120000 index 76921386..00000000 --- a/ansible_collections/community/general/plugins/modules/irc.py +++ /dev/null @@ -1 +0,0 @@ -notification/irc.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/iso_create.py b/ansible_collections/community/general/plugins/modules/iso_create.py deleted file mode 120000 index b47934b3..00000000 --- a/ansible_collections/community/general/plugins/modules/iso_create.py +++ /dev/null @@ -1 +0,0 @@ -files/iso_create.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/iso_extract.py b/ansible_collections/community/general/plugins/modules/iso_extract.py deleted file mode 120000 index 4bcbd351..00000000 --- a/ansible_collections/community/general/plugins/modules/iso_extract.py +++ /dev/null @@ -1 +0,0 @@ -files/iso_extract.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/jabber.py b/ansible_collections/community/general/plugins/modules/jabber.py deleted file mode 120000 index d25bac2f..00000000 --- a/ansible_collections/community/general/plugins/modules/jabber.py +++ /dev/null @@ -1 +0,0 @@ -notification/jabber.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/java_cert.py b/ansible_collections/community/general/plugins/modules/java_cert.py deleted file mode 120000 index 09d50620..00000000 --- a/ansible_collections/community/general/plugins/modules/java_cert.py +++ /dev/null @@ -1 +0,0 @@ -system/java_cert.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/java_keystore.py b/ansible_collections/community/general/plugins/modules/java_keystore.py deleted file mode 120000 index 1c53619e..00000000 --- a/ansible_collections/community/general/plugins/modules/java_keystore.py +++ /dev/null @@ -1 +0,0 @@ -system/java_keystore.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/jboss.py b/ansible_collections/community/general/plugins/modules/jboss.py deleted file mode 120000 index 3bd10bc7..00000000 --- a/ansible_collections/community/general/plugins/modules/jboss.py +++ /dev/null @@ -1 +0,0 @@ -web_infrastructure/jboss.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/jenkins_build.py b/ansible_collections/community/general/plugins/modules/jenkins_build.py deleted file mode 120000 index e26f4fad..00000000 --- a/ansible_collections/community/general/plugins/modules/jenkins_build.py +++ /dev/null @@ -1 +0,0 @@ -web_infrastructure/jenkins_build.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/jenkins_job.py b/ansible_collections/community/general/plugins/modules/jenkins_job.py deleted file mode 120000 index aa3468ee..00000000 --- a/ansible_collections/community/general/plugins/modules/jenkins_job.py +++ /dev/null @@ -1 +0,0 @@ -web_infrastructure/jenkins_job.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/jenkins_job_info.py b/ansible_collections/community/general/plugins/modules/jenkins_job_info.py deleted file mode 120000 index 4172732d..00000000 --- a/ansible_collections/community/general/plugins/modules/jenkins_job_info.py +++ /dev/null @@ -1 +0,0 @@ -web_infrastructure/jenkins_job_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/jenkins_plugin.py b/ansible_collections/community/general/plugins/modules/jenkins_plugin.py deleted file mode 120000 index 5b665bf7..00000000 --- a/ansible_collections/community/general/plugins/modules/jenkins_plugin.py +++ /dev/null @@ -1 +0,0 @@ -web_infrastructure/jenkins_plugin.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/jenkins_script.py b/ansible_collections/community/general/plugins/modules/jenkins_script.py deleted file mode 120000 index cba6534a..00000000 --- a/ansible_collections/community/general/plugins/modules/jenkins_script.py +++ /dev/null @@ -1 +0,0 @@ -web_infrastructure/jenkins_script.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/jira.py b/ansible_collections/community/general/plugins/modules/jira.py deleted file mode 120000 index 35d18694..00000000 --- a/ansible_collections/community/general/plugins/modules/jira.py +++ /dev/null @@ -1 +0,0 @@ -web_infrastructure/jira.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/kernel_blacklist.py b/ansible_collections/community/general/plugins/modules/kernel_blacklist.py deleted file mode 120000 index bbb9aaae..00000000 --- a/ansible_collections/community/general/plugins/modules/kernel_blacklist.py +++ /dev/null @@ -1 +0,0 @@ -system/kernel_blacklist.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/keycloak_authentication.py b/ansible_collections/community/general/plugins/modules/keycloak_authentication.py deleted file mode 120000 index 63ef9d37..00000000 --- a/ansible_collections/community/general/plugins/modules/keycloak_authentication.py +++ /dev/null @@ -1 +0,0 @@ -identity/keycloak/keycloak_authentication.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/keycloak_client.py b/ansible_collections/community/general/plugins/modules/keycloak_client.py deleted file mode 120000 index 2a016515..00000000 --- a/ansible_collections/community/general/plugins/modules/keycloak_client.py +++ /dev/null @@ -1 +0,0 @@ -identity/keycloak/keycloak_client.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/keycloak_client_rolemapping.py b/ansible_collections/community/general/plugins/modules/keycloak_client_rolemapping.py deleted file mode 120000 index 02243ca6..00000000 --- a/ansible_collections/community/general/plugins/modules/keycloak_client_rolemapping.py +++ /dev/null @@ -1 +0,0 @@ -identity/keycloak/keycloak_client_rolemapping.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/keycloak_clientscope.py b/ansible_collections/community/general/plugins/modules/keycloak_clientscope.py deleted file mode 120000 index 01468a5c..00000000 --- a/ansible_collections/community/general/plugins/modules/keycloak_clientscope.py +++ /dev/null @@ -1 +0,0 @@ -identity/keycloak/keycloak_clientscope.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/keycloak_clienttemplate.py b/ansible_collections/community/general/plugins/modules/keycloak_clienttemplate.py deleted file mode 120000 index a861fcd5..00000000 --- a/ansible_collections/community/general/plugins/modules/keycloak_clienttemplate.py +++ /dev/null @@ -1 +0,0 @@ -identity/keycloak/keycloak_clienttemplate.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/keycloak_group.py b/ansible_collections/community/general/plugins/modules/keycloak_group.py deleted file mode 120000 index 5ce7bf06..00000000 --- a/ansible_collections/community/general/plugins/modules/keycloak_group.py +++ /dev/null @@ -1 +0,0 @@ -identity/keycloak/keycloak_group.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/keycloak_identity_provider.py b/ansible_collections/community/general/plugins/modules/keycloak_identity_provider.py deleted file mode 120000 index 14ef2a9b..00000000 --- a/ansible_collections/community/general/plugins/modules/keycloak_identity_provider.py +++ /dev/null @@ -1 +0,0 @@ -identity/keycloak/keycloak_identity_provider.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/keycloak_realm.py b/ansible_collections/community/general/plugins/modules/keycloak_realm.py deleted file mode 120000 index e0e4155f..00000000 --- a/ansible_collections/community/general/plugins/modules/keycloak_realm.py +++ /dev/null @@ -1 +0,0 @@ -identity/keycloak/keycloak_realm.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/keycloak_realm_info.py b/ansible_collections/community/general/plugins/modules/keycloak_realm_info.py deleted file mode 120000 index 9d2b0a2a..00000000 --- a/ansible_collections/community/general/plugins/modules/keycloak_realm_info.py +++ /dev/null @@ -1 +0,0 @@ -identity/keycloak/keycloak_realm_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/keycloak_role.py b/ansible_collections/community/general/plugins/modules/keycloak_role.py deleted file mode 120000 index 8e6477ce..00000000 --- a/ansible_collections/community/general/plugins/modules/keycloak_role.py +++ /dev/null @@ -1 +0,0 @@ -identity/keycloak/keycloak_role.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/keycloak_user_federation.py b/ansible_collections/community/general/plugins/modules/keycloak_user_federation.py deleted file mode 120000 index e996a5c8..00000000 --- a/ansible_collections/community/general/plugins/modules/keycloak_user_federation.py +++ /dev/null @@ -1 +0,0 @@ -identity/keycloak/keycloak_user_federation.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/kibana_plugin.py b/ansible_collections/community/general/plugins/modules/kibana_plugin.py deleted file mode 120000 index bb8896c2..00000000 --- a/ansible_collections/community/general/plugins/modules/kibana_plugin.py +++ /dev/null @@ -1 +0,0 @@ -database/misc/kibana_plugin.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/launchd.py b/ansible_collections/community/general/plugins/modules/launchd.py deleted file mode 120000 index d88c77b6..00000000 --- a/ansible_collections/community/general/plugins/modules/launchd.py +++ /dev/null @@ -1 +0,0 @@ -system/launchd.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/layman.py b/ansible_collections/community/general/plugins/modules/layman.py deleted file mode 120000 index c7bc59f3..00000000 --- a/ansible_collections/community/general/plugins/modules/layman.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/layman.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/lbu.py b/ansible_collections/community/general/plugins/modules/lbu.py deleted file mode 120000 index e4ee0cf9..00000000 --- a/ansible_collections/community/general/plugins/modules/lbu.py +++ /dev/null @@ -1 +0,0 @@ -system/lbu.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ldap_attrs.py b/ansible_collections/community/general/plugins/modules/ldap_attrs.py deleted file mode 120000 index 267942a5..00000000 --- a/ansible_collections/community/general/plugins/modules/ldap_attrs.py +++ /dev/null @@ -1 +0,0 @@ -net_tools/ldap/ldap_attrs.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ldap_entry.py b/ansible_collections/community/general/plugins/modules/ldap_entry.py deleted file mode 120000 index 6dc01914..00000000 --- a/ansible_collections/community/general/plugins/modules/ldap_entry.py +++ /dev/null @@ -1 +0,0 @@ -net_tools/ldap/ldap_entry.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ldap_passwd.py b/ansible_collections/community/general/plugins/modules/ldap_passwd.py deleted file mode 120000 index b9d4a2f8..00000000 --- a/ansible_collections/community/general/plugins/modules/ldap_passwd.py +++ /dev/null @@ -1 +0,0 @@ -net_tools/ldap/ldap_passwd.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ldap_search.py b/ansible_collections/community/general/plugins/modules/ldap_search.py deleted file mode 120000 index 59db7a2b..00000000 --- a/ansible_collections/community/general/plugins/modules/ldap_search.py +++ /dev/null @@ -1 +0,0 @@ -net_tools/ldap/ldap_search.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/librato_annotation.py b/ansible_collections/community/general/plugins/modules/librato_annotation.py deleted file mode 120000 index 6a7a7d7e..00000000 --- a/ansible_collections/community/general/plugins/modules/librato_annotation.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/librato_annotation.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/linode.py b/ansible_collections/community/general/plugins/modules/linode.py deleted file mode 120000 index 552b7ce8..00000000 --- a/ansible_collections/community/general/plugins/modules/linode.py +++ /dev/null @@ -1 +0,0 @@ -cloud/linode/linode.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/linode_v4.py b/ansible_collections/community/general/plugins/modules/linode_v4.py deleted file mode 120000 index 5475546a..00000000 --- a/ansible_collections/community/general/plugins/modules/linode_v4.py +++ /dev/null @@ -1 +0,0 @@ -cloud/linode/linode_v4.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/listen_ports_facts.py b/ansible_collections/community/general/plugins/modules/listen_ports_facts.py deleted file mode 120000 index e197ccd7..00000000 --- a/ansible_collections/community/general/plugins/modules/listen_ports_facts.py +++ /dev/null @@ -1 +0,0 @@ -system/listen_ports_facts.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/lldp.py b/ansible_collections/community/general/plugins/modules/lldp.py deleted file mode 120000 index b7946760..00000000 --- a/ansible_collections/community/general/plugins/modules/lldp.py +++ /dev/null @@ -1 +0,0 @@ -net_tools/lldp.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/locale_gen.py b/ansible_collections/community/general/plugins/modules/locale_gen.py deleted file mode 120000 index b5f6330e..00000000 --- a/ansible_collections/community/general/plugins/modules/locale_gen.py +++ /dev/null @@ -1 +0,0 @@ -system/locale_gen.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/logentries.py b/ansible_collections/community/general/plugins/modules/logentries.py deleted file mode 120000 index db4290e3..00000000 --- a/ansible_collections/community/general/plugins/modules/logentries.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/logentries.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/logentries_msg.py b/ansible_collections/community/general/plugins/modules/logentries_msg.py deleted file mode 120000 index 54e2ef5b..00000000 --- a/ansible_collections/community/general/plugins/modules/logentries_msg.py +++ /dev/null @@ -1 +0,0 @@ -notification/logentries_msg.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/logstash_plugin.py b/ansible_collections/community/general/plugins/modules/logstash_plugin.py deleted file mode 120000 index 2d1f1744..00000000 --- a/ansible_collections/community/general/plugins/modules/logstash_plugin.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/logstash_plugin.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/lvg.py b/ansible_collections/community/general/plugins/modules/lvg.py deleted file mode 120000 index 93410ad9..00000000 --- a/ansible_collections/community/general/plugins/modules/lvg.py +++ /dev/null @@ -1 +0,0 @@ -system/lvg.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/lvol.py b/ansible_collections/community/general/plugins/modules/lvol.py deleted file mode 120000 index ee479751..00000000 --- a/ansible_collections/community/general/plugins/modules/lvol.py +++ /dev/null @@ -1 +0,0 @@ -system/lvol.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/lxc_container.py b/ansible_collections/community/general/plugins/modules/lxc_container.py deleted file mode 120000 index b26b7e73..00000000 --- a/ansible_collections/community/general/plugins/modules/lxc_container.py +++ /dev/null @@ -1 +0,0 @@ -cloud/lxc/lxc_container.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/lxca_cmms.py b/ansible_collections/community/general/plugins/modules/lxca_cmms.py deleted file mode 120000 index 8829ef50..00000000 --- a/ansible_collections/community/general/plugins/modules/lxca_cmms.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/lxca/lxca_cmms.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/lxca_nodes.py b/ansible_collections/community/general/plugins/modules/lxca_nodes.py deleted file mode 120000 index e07787a2..00000000 --- a/ansible_collections/community/general/plugins/modules/lxca_nodes.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/lxca/lxca_nodes.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/lxd_container.py b/ansible_collections/community/general/plugins/modules/lxd_container.py deleted file mode 120000 index 730ae859..00000000 --- a/ansible_collections/community/general/plugins/modules/lxd_container.py +++ /dev/null @@ -1 +0,0 @@ -cloud/lxd/lxd_container.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/lxd_profile.py b/ansible_collections/community/general/plugins/modules/lxd_profile.py deleted file mode 120000 index c88ceed2..00000000 --- a/ansible_collections/community/general/plugins/modules/lxd_profile.py +++ /dev/null @@ -1 +0,0 @@ -cloud/lxd/lxd_profile.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/macports.py b/ansible_collections/community/general/plugins/modules/macports.py deleted file mode 120000 index b96d9f44..00000000 --- a/ansible_collections/community/general/plugins/modules/macports.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/macports.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/mail.py b/ansible_collections/community/general/plugins/modules/mail.py deleted file mode 120000 index d86ab92e..00000000 --- a/ansible_collections/community/general/plugins/modules/mail.py +++ /dev/null @@ -1 +0,0 @@ -notification/mail.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/make.py b/ansible_collections/community/general/plugins/modules/make.py deleted file mode 120000 index c8433e44..00000000 --- a/ansible_collections/community/general/plugins/modules/make.py +++ /dev/null @@ -1 +0,0 @@ -system/make.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/manageiq_alert_profiles.py b/ansible_collections/community/general/plugins/modules/manageiq_alert_profiles.py deleted file mode 120000 index 309776dc..00000000 --- a/ansible_collections/community/general/plugins/modules/manageiq_alert_profiles.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/manageiq/manageiq_alert_profiles.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/manageiq_alerts.py b/ansible_collections/community/general/plugins/modules/manageiq_alerts.py deleted file mode 120000 index d44669ec..00000000 --- a/ansible_collections/community/general/plugins/modules/manageiq_alerts.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/manageiq/manageiq_alerts.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/manageiq_group.py b/ansible_collections/community/general/plugins/modules/manageiq_group.py deleted file mode 120000 index 858b37b1..00000000 --- a/ansible_collections/community/general/plugins/modules/manageiq_group.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/manageiq/manageiq_group.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/manageiq_policies.py b/ansible_collections/community/general/plugins/modules/manageiq_policies.py deleted file mode 120000 index 3a0327af..00000000 --- a/ansible_collections/community/general/plugins/modules/manageiq_policies.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/manageiq/manageiq_policies.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/manageiq_provider.py b/ansible_collections/community/general/plugins/modules/manageiq_provider.py deleted file mode 120000 index 17397e60..00000000 --- a/ansible_collections/community/general/plugins/modules/manageiq_provider.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/manageiq/manageiq_provider.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/manageiq_tags.py b/ansible_collections/community/general/plugins/modules/manageiq_tags.py deleted file mode 120000 index d86d5988..00000000 --- a/ansible_collections/community/general/plugins/modules/manageiq_tags.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/manageiq/manageiq_tags.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/manageiq_tenant.py b/ansible_collections/community/general/plugins/modules/manageiq_tenant.py deleted file mode 120000 index 10c8823d..00000000 --- a/ansible_collections/community/general/plugins/modules/manageiq_tenant.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/manageiq/manageiq_tenant.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/manageiq_user.py b/ansible_collections/community/general/plugins/modules/manageiq_user.py deleted file mode 120000 index 4b5a8972..00000000 --- a/ansible_collections/community/general/plugins/modules/manageiq_user.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/manageiq/manageiq_user.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/mas.py b/ansible_collections/community/general/plugins/modules/mas.py deleted file mode 120000 index 4307f447..00000000 --- a/ansible_collections/community/general/plugins/modules/mas.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/mas.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/matrix.py b/ansible_collections/community/general/plugins/modules/matrix.py deleted file mode 120000 index 8f6f093f..00000000 --- a/ansible_collections/community/general/plugins/modules/matrix.py +++ /dev/null @@ -1 +0,0 @@ -notification/matrix.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/mattermost.py b/ansible_collections/community/general/plugins/modules/mattermost.py deleted file mode 120000 index 3918eb6b..00000000 --- a/ansible_collections/community/general/plugins/modules/mattermost.py +++ /dev/null @@ -1 +0,0 @@ -notification/mattermost.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/maven_artifact.py b/ansible_collections/community/general/plugins/modules/maven_artifact.py deleted file mode 120000 index 053e4217..00000000 --- a/ansible_collections/community/general/plugins/modules/maven_artifact.py +++ /dev/null @@ -1 +0,0 @@ -packaging/language/maven_artifact.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/memset_dns_reload.py b/ansible_collections/community/general/plugins/modules/memset_dns_reload.py deleted file mode 120000 index af1b549d..00000000 --- a/ansible_collections/community/general/plugins/modules/memset_dns_reload.py +++ /dev/null @@ -1 +0,0 @@ -cloud/memset/memset_dns_reload.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/memset_memstore_info.py b/ansible_collections/community/general/plugins/modules/memset_memstore_info.py deleted file mode 120000 index 1e6bde1a..00000000 --- a/ansible_collections/community/general/plugins/modules/memset_memstore_info.py +++ /dev/null @@ -1 +0,0 @@ -cloud/memset/memset_memstore_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/memset_server_info.py b/ansible_collections/community/general/plugins/modules/memset_server_info.py deleted file mode 120000 index cb8677c3..00000000 --- a/ansible_collections/community/general/plugins/modules/memset_server_info.py +++ /dev/null @@ -1 +0,0 @@ -cloud/memset/memset_server_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/memset_zone.py b/ansible_collections/community/general/plugins/modules/memset_zone.py deleted file mode 120000 index 10de8bb7..00000000 --- a/ansible_collections/community/general/plugins/modules/memset_zone.py +++ /dev/null @@ -1 +0,0 @@ -cloud/memset/memset_zone.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/memset_zone_domain.py b/ansible_collections/community/general/plugins/modules/memset_zone_domain.py deleted file mode 120000 index 2b931089..00000000 --- a/ansible_collections/community/general/plugins/modules/memset_zone_domain.py +++ /dev/null @@ -1 +0,0 @@ -cloud/memset/memset_zone_domain.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/memset_zone_record.py b/ansible_collections/community/general/plugins/modules/memset_zone_record.py deleted file mode 120000 index ee5eadf4..00000000 --- a/ansible_collections/community/general/plugins/modules/memset_zone_record.py +++ /dev/null @@ -1 +0,0 @@ -cloud/memset/memset_zone_record.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/mksysb.py b/ansible_collections/community/general/plugins/modules/mksysb.py deleted file mode 120000 index bae825ca..00000000 --- a/ansible_collections/community/general/plugins/modules/mksysb.py +++ /dev/null @@ -1 +0,0 @@ -system/mksysb.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/modprobe.py b/ansible_collections/community/general/plugins/modules/modprobe.py deleted file mode 120000 index e093a236..00000000 --- a/ansible_collections/community/general/plugins/modules/modprobe.py +++ /dev/null @@ -1 +0,0 @@ -system/modprobe.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/monit.py b/ansible_collections/community/general/plugins/modules/monit.py deleted file mode 120000 index b54af910..00000000 --- a/ansible_collections/community/general/plugins/modules/monit.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/monit.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/monitoring/airbrake_deployment.py b/ansible_collections/community/general/plugins/modules/monitoring/airbrake_deployment.py deleted file mode 100644 index a7d7710a..00000000 --- a/ansible_collections/community/general/plugins/modules/monitoring/airbrake_deployment.py +++ /dev/null @@ -1,162 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2013 Bruce Pennypacker -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: airbrake_deployment -author: -- "Bruce Pennypacker (@bpennypacker)" -- "Patrick Humpal (@phumpal)" -short_description: Notify airbrake about app deployments -description: - - Notify airbrake about app deployments (see U(https://airbrake.io/docs/api/#deploys-v4)). -options: - project_id: - description: - - Airbrake PROJECT_ID - required: true - type: str - version_added: '0.2.0' - project_key: - description: - - Airbrake PROJECT_KEY. - required: true - type: str - version_added: '0.2.0' - environment: - description: - - The airbrake environment name, typically 'production', 'staging', etc. - required: true - type: str - user: - description: - - The username of the person doing the deployment - required: false - type: str - repo: - description: - - URL of the project repository - required: false - type: str - revision: - description: - - A hash, number, tag, or other identifier showing what revision from version control was deployed - required: false - type: str - version: - description: - - A string identifying what version was deployed - required: false - type: str - version_added: '1.0.0' - url: - description: - - Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit. - required: false - default: "https://api.airbrake.io/api/v4/projects/" - type: str - validate_certs: - description: - - If C(no), SSL certificates for the target url will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - type: bool - -requirements: [] -''' - -EXAMPLES = ''' -- name: Notify airbrake about an app deployment - community.general.airbrake_deployment: - project_id: '12345' - project_key: 'AAAAAA' - environment: staging - user: ansible - revision: '4.2' - -- name: Notify airbrake about an app deployment, using git hash as revision - community.general.airbrake_deployment: - project_id: '12345' - project_key: 'AAAAAA' - environment: staging - user: ansible - revision: 'e54dd3a01f2c421b558ef33b5f79db936e2dcf15' - version: '0.2.0' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url -from ansible.module_utils.six.moves.urllib.parse import urlencode - - -# =========================================== -# Module execution. -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - project_id=dict(required=True, no_log=True, type='str'), - project_key=dict(required=True, no_log=True, type='str'), - environment=dict(required=True, type='str'), - user=dict(required=False, type='str'), - repo=dict(required=False, type='str'), - revision=dict(required=False, type='str'), - version=dict(required=False, type='str'), - url=dict(required=False, default='https://api.airbrake.io/api/v4/projects/', type='str'), - validate_certs=dict(default=True, type='bool'), - ), - supports_check_mode=True, - ) - - # Build list of params - params = {} - - # If we're in check mode, just exit pretending like we succeeded - if module.check_mode: - module.exit_json(changed=True) - - # v4 API documented at https://airbrake.io/docs/api/#create-deploy-v4 - if module.params["environment"]: - params["environment"] = module.params["environment"] - - if module.params["user"]: - params["username"] = module.params["user"] - - if module.params["repo"]: - params["repository"] = module.params["repo"] - - if module.params["revision"]: - params["revision"] = module.params["revision"] - - if module.params["version"]: - params["version"] = module.params["version"] - - # Build deploy url - url = module.params.get('url') + module.params["project_id"] + '/deploys?key=' + module.params["project_key"] - json_body = module.jsonify(params) - - # Build header - headers = {'Content-Type': 'application/json'} - - # Notify Airbrake of deploy - response, info = fetch_url(module, url, data=json_body, - headers=headers, method='POST') - - if info['status'] == 200 or info['status'] == 201: - module.exit_json(changed=True) - else: - module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url)) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/monitoring/bigpanda.py b/ansible_collections/community/general/plugins/modules/monitoring/bigpanda.py deleted file mode 100644 index c5fe61cb..00000000 --- a/ansible_collections/community/general/plugins/modules/monitoring/bigpanda.py +++ /dev/null @@ -1,218 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: bigpanda -author: "Hagai Kariti (@hkariti)" -short_description: Notify BigPanda about deployments -description: - - Notify BigPanda when deployments start and end (successfully or not). Returns a deployment object containing all the parameters for future module calls. -options: - component: - type: str - description: - - "The name of the component being deployed. Ex: billing" - required: true - aliases: ['name'] - version: - type: str - description: - - The deployment version. - required: true - token: - type: str - description: - - API token. - required: true - state: - type: str - description: - - State of the deployment. - required: true - choices: ['started', 'finished', 'failed'] - hosts: - type: str - description: - - Name of affected host name. Can be a list. - - If not specified, it defaults to the remote system's hostname. - required: false - aliases: ['host'] - env: - type: str - description: - - The environment name, typically 'production', 'staging', etc. - required: false - owner: - type: str - description: - - The person responsible for the deployment. - required: false - description: - type: str - description: - - Free text description of the deployment. - required: false - url: - type: str - description: - - Base URL of the API server. - required: False - default: https://api.bigpanda.io - validate_certs: - description: - - If C(no), SSL certificates for the target url will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - type: bool - deployment_message: - type: str - description: - - Message about the deployment. - version_added: '0.2.0' - source_system: - type: str - description: - - Source system used in the requests to the API - default: ansible - -# informational: requirements for nodes -requirements: [ ] -''' - -EXAMPLES = ''' -- name: Notify BigPanda about a deployment - community.general.bigpanda: - component: myapp - version: '1.3' - token: '{{ bigpanda_token }}' - state: started - -- name: Notify BigPanda about a deployment - community.general.bigpanda: - component: myapp - version: '1.3' - token: '{{ bigpanda_token }}' - state: finished - -# If outside servers aren't reachable from your machine, use delegate_to and override hosts: -- name: Notify BigPanda about a deployment - community.general.bigpanda: - component: myapp - version: '1.3' - token: '{{ bigpanda_token }}' - hosts: '{{ ansible_hostname }}' - state: started - delegate_to: localhost - register: deployment - -- name: Notify BigPanda about a deployment - community.general.bigpanda: - component: '{{ deployment.component }}' - version: '{{ deployment.version }}' - token: '{{ deployment.token }}' - state: finished - delegate_to: localhost -''' - -# =========================================== -# Module execution. -# -import json -import socket -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.urls import fetch_url - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - component=dict(required=True, aliases=['name']), - version=dict(required=True), - token=dict(required=True, no_log=True), - state=dict(required=True, choices=['started', 'finished', 'failed']), - hosts=dict(required=False, aliases=['host']), - env=dict(required=False), - owner=dict(required=False), - description=dict(required=False), - deployment_message=dict(required=False), - source_system=dict(required=False, default='ansible'), - validate_certs=dict(default=True, type='bool'), - url=dict(required=False, default='https://api.bigpanda.io'), - ), - supports_check_mode=True, - ) - - token = module.params['token'] - state = module.params['state'] - url = module.params['url'] - - # Build the common request body - body = dict() - for k in ('component', 'version', 'hosts'): - v = module.params[k] - if v is not None: - body[k] = v - if body.get('hosts') is None: - body['hosts'] = [socket.gethostname()] - - if not isinstance(body['hosts'], list): - body['hosts'] = [body['hosts']] - - # Insert state-specific attributes to body - if state == 'started': - for k in ('source_system', 'env', 'owner', 'description'): - v = module.params[k] - if v is not None: - body[k] = v - - request_url = url + '/data/events/deployments/start' - else: - message = module.params['deployment_message'] - if message is not None: - body['errorMessage'] = message - - if state == 'finished': - body['status'] = 'success' - else: - body['status'] = 'failure' - - request_url = url + '/data/events/deployments/end' - - # Build the deployment object we return - deployment = dict(token=token, url=url) - deployment.update(body) - if 'errorMessage' in deployment: - message = deployment.pop('errorMessage') - deployment['message'] = message - - # If we're in check mode, just exit pretending like we succeeded - if module.check_mode: - module.exit_json(changed=True, **deployment) - - # Send the data to bigpanda - data = json.dumps(body) - headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'} - try: - response, info = fetch_url(module, request_url, data=data, headers=headers) - if info['status'] == 200: - module.exit_json(changed=True, **deployment) - else: - module.fail_json(msg=json.dumps(info)) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/monitoring/circonus_annotation.py b/ansible_collections/community/general/plugins/modules/monitoring/circonus_annotation.py deleted file mode 100644 index 40c7297d..00000000 --- a/ansible_collections/community/general/plugins/modules/monitoring/circonus_annotation.py +++ /dev/null @@ -1,235 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2014-2015, Epic Games, Inc. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: circonus_annotation -short_description: create an annotation in circonus -description: - - Create an annotation event with a given category, title and description. Optionally start, end or durations can be provided -author: "Nick Harring (@NickatEpic)" -requirements: - - requests (either >= 2.0.0 for Python 3, or >= 1.0.0 for Python 2) -notes: - - Check mode isn't supported. -options: - api_key: - type: str - description: - - Circonus API key - required: true - category: - type: str - description: - - Annotation Category - required: true - description: - type: str - description: - - Description of annotation - required: true - title: - type: str - description: - - Title of annotation - required: true - start: - type: int - description: - - Unix timestamp of event start - - If not specified, it defaults to I(now). - stop: - type: int - description: - - Unix timestamp of event end - - If not specified, it defaults to I(now) + I(duration). - duration: - type: int - description: - - Duration in seconds of annotation - default: 0 -''' -EXAMPLES = ''' -- name: Create a simple annotation event with a source, defaults to start and end time of now - community.general.circonus_annotation: - api_key: XXXXXXXXXXXXXXXXX - title: App Config Change - description: This is a detailed description of the config change - category: This category groups like annotations - -- name: Create an annotation with a duration of 5 minutes and a default start time of now - community.general.circonus_annotation: - api_key: XXXXXXXXXXXXXXXXX - title: App Config Change - description: This is a detailed description of the config change - category: This category groups like annotations - duration: 300 - -- name: Create an annotation with a start_time and end_time - community.general.circonus_annotation: - api_key: XXXXXXXXXXXXXXXXX - title: App Config Change - description: This is a detailed description of the config change - category: This category groups like annotations - start_time: 1395940006 - end_time: 1395954407 -''' - -RETURN = ''' -annotation: - description: details about the created annotation - returned: success - type: complex - contains: - _cid: - description: annotation identifier - returned: success - type: str - sample: /annotation/100000 - _created: - description: creation timestamp - returned: success - type: int - sample: 1502236928 - _last_modified: - description: last modification timestamp - returned: success - type: int - sample: 1502236928 - _last_modified_by: - description: last modified by - returned: success - type: str - sample: /user/1000 - category: - description: category of the created annotation - returned: success - type: str - sample: alerts - title: - description: title of the created annotation - returned: success - type: str - sample: WARNING - description: - description: description of the created annotation - returned: success - type: str - sample: Host is down. - start: - description: timestamp, since annotation applies - returned: success - type: int - sample: Host is down. - stop: - description: timestamp, since annotation ends - returned: success - type: str - sample: Host is down. - rel_metrics: - description: Array of metrics related to this annotation, each metrics is a string. - returned: success - type: list - sample: - - 54321_kbps -''' -import json -import time -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests - HAS_REQUESTS = True -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - HAS_REQUESTS = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.six import PY3 -from ansible.module_utils.common.text.converters import to_native - - -def check_requests_dep(module): - """Check if an adequate requests version is available""" - if not HAS_REQUESTS: - module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - else: - required_version = '2.0.0' if PY3 else '1.0.0' - if LooseVersion(requests.__version__) < LooseVersion(required_version): - module.fail_json(msg="'requests' library version should be >= %s, found: %s." % (required_version, requests.__version__)) - - -def post_annotation(annotation, api_key): - ''' Takes annotation dict and api_key string''' - base_url = 'https://api.circonus.com/v2' - anootate_post_endpoint = '/annotation' - resp = requests.post(base_url + anootate_post_endpoint, - headers=build_headers(api_key), data=json.dumps(annotation)) - resp.raise_for_status() - return resp - - -def create_annotation(module): - ''' Takes ansible module object ''' - annotation = {} - duration = module.params['duration'] - if module.params['start'] is not None: - start = module.params['start'] - else: - start = int(time.time()) - if module.params['stop'] is not None: - stop = module.params['stop'] - else: - stop = int(time.time()) + duration - annotation['start'] = start - annotation['stop'] = stop - annotation['category'] = module.params['category'] - annotation['description'] = module.params['description'] - annotation['title'] = module.params['title'] - return annotation - - -def build_headers(api_token): - '''Takes api token, returns headers with it included.''' - headers = {'X-Circonus-App-Name': 'ansible', - 'Host': 'api.circonus.com', 'X-Circonus-Auth-Token': api_token, - 'Accept': 'application/json'} - return headers - - -def main(): - '''Main function, dispatches logic''' - module = AnsibleModule( - argument_spec=dict( - start=dict(type='int'), - stop=dict(type='int'), - category=dict(required=True), - title=dict(required=True), - description=dict(required=True), - duration=dict(default=0, type='int'), - api_key=dict(required=True, no_log=True) - ) - ) - - check_requests_dep(module) - - annotation = create_annotation(module) - try: - resp = post_annotation(annotation, module.params['api_key']) - except requests.exceptions.RequestException as e: - module.fail_json(msg='Request Failed', reason=to_native(e), exception=traceback.format_exc()) - module.exit_json(changed=True, annotation=resp.json()) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/monitoring/datadog/datadog_downtime.py b/ansible_collections/community/general/plugins/modules/monitoring/datadog/datadog_downtime.py deleted file mode 100644 index ef308bda..00000000 --- a/ansible_collections/community/general/plugins/modules/monitoring/datadog/datadog_downtime.py +++ /dev/null @@ -1,308 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2020, Datadog, Inc -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = """ ---- -module: datadog_downtime -short_description: Manages Datadog downtimes -version_added: 2.0.0 -description: - - Manages downtimes within Datadog. - - Options as described on U(https://docs.datadoghq.com/api/v1/downtimes/s). -author: - - Datadog (@Datadog) -requirements: - - datadog-api-client - - Python 3.6+ -options: - api_key: - description: - - Your Datadog API key. - required: true - type: str - api_host: - description: - - The URL to the Datadog API. - - This value can also be set with the C(DATADOG_HOST) environment variable. - required: false - default: https://api.datadoghq.com - type: str - app_key: - description: - - Your Datadog app key. - required: true - type: str - state: - description: - - The designated state of the downtime. - required: false - choices: ["present", "absent"] - default: present - type: str - id: - description: - - The identifier of the downtime. - - If empty, a new downtime gets created, otherwise it is either updated or deleted depending of the C(state). - - To keep your playbook idempotent, you should save the identifier in a file and read it in a lookup. - type: int - monitor_tags: - description: - - A list of monitor tags to which the downtime applies. - - The resulting downtime applies to monitors that match ALL provided monitor tags. - type: list - elements: str - scope: - description: - - A list of scopes to which the downtime applies. - - The resulting downtime applies to sources that matches ALL provided scopes. - type: list - elements: str - monitor_id: - description: - - The ID of the monitor to mute. If not provided, the downtime applies to all monitors. - type: int - downtime_message: - description: - - A message to include with notifications for this downtime. - - Email notifications can be sent to specific users by using the same "@username" notation as events. - type: str - start: - type: int - description: - - POSIX timestamp to start the downtime. If not provided, the downtime starts the moment it is created. - end: - type: int - description: - - POSIX timestamp to end the downtime. If not provided, the downtime is in effect until you cancel it. - timezone: - description: - - The timezone for the downtime. - type: str - rrule: - description: - - The C(RRULE) standard for defining recurring events. - - For example, to have a recurring event on the first day of each month, - select a type of rrule and set the C(FREQ) to C(MONTHLY) and C(BYMONTHDAY) to C(1). - - Most common rrule options from the iCalendar Spec are supported. - - Attributes specifying the duration in C(RRULE) are not supported (e.g. C(DTSTART), C(DTEND), C(DURATION)). - type: str -""" - -EXAMPLES = """ - - name: Create a downtime - register: downtime_var - community.general.datadog_downtime: - state: present - monitor_tags: - - "foo:bar" - downtime_message: "Downtime for foo:bar" - scope: "test" - api_key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" - app_key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" - # Lookup the id in the file and ignore errors if the file doesn't exits, so downtime gets created - id: "{{ lookup('file', inventory_hostname ~ '_downtime_id.txt', errors='ignore') }}" - - name: Save downtime id to file for later updates and idempotence - delegate_to: localhost - copy: - content: "{{ downtime.downtime.id }}" - dest: "{{ inventory_hostname ~ '_downtime_id.txt' }}" -""" - -RETURN = """ -# Returns the downtime JSON dictionary from the API response under the C(downtime) key. -# See https://docs.datadoghq.com/api/v1/downtimes/#schedule-a-downtime for more details. -downtime: - description: The downtime returned by the API. - type: dict - returned: always - sample: { - "active": true, - "canceled": null, - "creator_id": 1445416, - "disabled": false, - "downtime_type": 2, - "end": null, - "id": 1055751000, - "message": "Downtime for foo:bar", - "monitor_id": null, - "monitor_tags": [ - "foo:bar" - ], - "parent_id": null, - "recurrence": null, - "scope": [ - "test" - ], - "start": 1607015009, - "timezone": "UTC", - "updater_id": null - } -""" - -import traceback - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -# Import Datadog -from ansible.module_utils.common.text.converters import to_native - -DATADOG_IMP_ERR = None -HAS_DATADOG = True -try: - from datadog_api_client.v1 import Configuration, ApiClient, ApiException - from datadog_api_client.v1.api.downtimes_api import DowntimesApi - from datadog_api_client.v1.model.downtime import Downtime - from datadog_api_client.v1.model.downtime_recurrence import DowntimeRecurrence -except ImportError: - DATADOG_IMP_ERR = traceback.format_exc() - HAS_DATADOG = False - - -def main(): - module = AnsibleModule( - argument_spec=dict( - api_key=dict(required=True, no_log=True), - api_host=dict(required=False, default="https://api.datadoghq.com"), - app_key=dict(required=True, no_log=True), - state=dict(required=False, choices=["present", "absent"], default="present"), - monitor_tags=dict(required=False, type="list", elements="str"), - scope=dict(required=False, type="list", elements="str"), - monitor_id=dict(required=False, type="int"), - downtime_message=dict(required=False, no_log=True), - start=dict(required=False, type="int"), - end=dict(required=False, type="int"), - timezone=dict(required=False, type="str"), - rrule=dict(required=False, type="str"), - id=dict(required=False, type="int"), - ) - ) - - # Prepare Datadog - if not HAS_DATADOG: - module.fail_json(msg=missing_required_lib("datadog-api-client"), exception=DATADOG_IMP_ERR) - - configuration = Configuration( - host=module.params["api_host"], - api_key={ - "apiKeyAuth": module.params["api_key"], - "appKeyAuth": module.params["app_key"] - } - ) - with ApiClient(configuration) as api_client: - api_client.user_agent = "ansible_collection/community_general (module_name datadog_downtime) {0}".format( - api_client.user_agent - ) - api_instance = DowntimesApi(api_client) - - # Validate api and app keys - try: - api_instance.list_downtimes(current_only=True) - except ApiException as e: - module.fail_json(msg="Failed to connect Datadog server using given app_key and api_key: {0}".format(e)) - - if module.params["state"] == "present": - schedule_downtime(module, api_client) - elif module.params["state"] == "absent": - cancel_downtime(module, api_client) - - -def _get_downtime(module, api_client): - api = DowntimesApi(api_client) - downtime = None - if module.params["id"]: - try: - downtime = api.get_downtime(module.params["id"]) - except ApiException as e: - module.fail_json(msg="Failed to retrieve downtime with id {0}: {1}".format(module.params["id"], e)) - return downtime - - -def build_downtime(module): - downtime = Downtime() - if module.params["monitor_tags"]: - downtime.monitor_tags = module.params["monitor_tags"] - if module.params["scope"]: - downtime.scope = module.params["scope"] - if module.params["monitor_id"]: - downtime.monitor_id = module.params["monitor_id"] - if module.params["downtime_message"]: - downtime.message = module.params["downtime_message"] - if module.params["start"]: - downtime.start = module.params["start"] - if module.params["end"]: - downtime.end = module.params["end"] - if module.params["timezone"]: - downtime.timezone = module.params["timezone"] - if module.params["rrule"]: - downtime.recurrence = DowntimeRecurrence( - rrule=module.params["rrule"] - ) - return downtime - - -def _post_downtime(module, api_client): - api = DowntimesApi(api_client) - downtime = build_downtime(module) - try: - resp = api.create_downtime(downtime) - module.params["id"] = resp.id - module.exit_json(changed=True, downtime=resp.to_dict()) - except ApiException as e: - module.fail_json(msg="Failed to create downtime: {0}".format(e)) - - -def _equal_dicts(a, b, ignore_keys): - ka = set(a).difference(ignore_keys) - kb = set(b).difference(ignore_keys) - return ka == kb and all(a[k] == b[k] for k in ka) - - -def _update_downtime(module, current_downtime, api_client): - api = DowntimesApi(api_client) - downtime = build_downtime(module) - try: - if current_downtime.disabled: - resp = api.create_downtime(downtime) - else: - resp = api.update_downtime(module.params["id"], downtime) - if _equal_dicts( - resp.to_dict(), - current_downtime.to_dict(), - ["active", "creator_id", "updater_id"] - ): - module.exit_json(changed=False, downtime=resp.to_dict()) - else: - module.exit_json(changed=True, downtime=resp.to_dict()) - except ApiException as e: - module.fail_json(msg="Failed to update downtime: {0}".format(e)) - - -def schedule_downtime(module, api_client): - downtime = _get_downtime(module, api_client) - if downtime is None: - _post_downtime(module, api_client) - else: - _update_downtime(module, downtime, api_client) - - -def cancel_downtime(module, api_client): - downtime = _get_downtime(module, api_client) - api = DowntimesApi(api_client) - if downtime is None: - module.exit_json(changed=False) - try: - api.cancel_downtime(downtime["id"]) - except ApiException as e: - module.fail_json(msg="Failed to create downtime: {0}".format(e)) - - module.exit_json(changed=True) - - -if __name__ == "__main__": - main() diff --git a/ansible_collections/community/general/plugins/modules/monitoring/datadog/datadog_event.py b/ansible_collections/community/general/plugins/modules/monitoring/datadog/datadog_event.py deleted file mode 100644 index 6284b5bf..00000000 --- a/ansible_collections/community/general/plugins/modules/monitoring/datadog/datadog_event.py +++ /dev/null @@ -1,184 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Author: Artūras 'arturaz' Šlajus -# Author: Naoya Nakazawa -# -# This module is proudly sponsored by iGeolise (www.igeolise.com) and -# Tiny Lab Productions (www.tinylabproductions.com). -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: datadog_event -short_description: Posts events to Datadog service -description: -- "Allows to post events to Datadog (www.datadoghq.com) service." -- "Uses http://docs.datadoghq.com/api/#events API." -author: -- "Artūras `arturaz` Šlajus (@arturaz)" -- "Naoya Nakazawa (@n0ts)" -options: - api_key: - type: str - description: ["Your DataDog API key."] - required: true - app_key: - type: str - description: ["Your DataDog app key."] - required: true - title: - type: str - description: ["The event title."] - required: true - text: - type: str - description: ["The body of the event."] - required: true - date_happened: - type: int - description: - - POSIX timestamp of the event. - - Default value is now. - priority: - type: str - description: ["The priority of the event."] - default: normal - choices: [normal, low] - host: - type: str - description: - - Host name to associate with the event. - - If not specified, it defaults to the remote system's hostname. - api_host: - type: str - description: - - DataDog API endpoint URL. - version_added: '3.3.0' - tags: - type: list - elements: str - description: ["Comma separated list of tags to apply to the event."] - alert_type: - type: str - description: ["Type of alert."] - default: info - choices: ['error', 'warning', 'info', 'success'] - aggregation_key: - type: str - description: ["An arbitrary string to use for aggregation."] - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - type: bool - default: 'yes' -''' - -EXAMPLES = ''' -- name: Post an event with low priority - community.general.datadog_event: - title: Testing from ansible - text: Test - priority: low - api_key: 9775a026f1ca7d1c6c5af9d94d9595a4 - app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN - -- name: Post an event with several tags - community.general.datadog_event: - title: Testing from ansible - text: Test - api_key: 9775a026f1ca7d1c6c5af9d94d9595a4 - app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN - tags: 'aa,bb,#host:{{ inventory_hostname }}' - -- name: Post an event with several tags to another endpoint - community.general.datadog_event: - title: Testing from ansible - text: Test - api_key: 9775a026f1ca7d1c6c5af9d94d9595a4 - app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN - api_host: 'https://example.datadoghq.eu' - tags: - - aa - - b - - '#host:{{ inventory_hostname }}' - -''' - -import platform -import traceback - -# Import Datadog -DATADOG_IMP_ERR = None -try: - from datadog import initialize, api - HAS_DATADOG = True -except Exception: - DATADOG_IMP_ERR = traceback.format_exc() - HAS_DATADOG = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -def main(): - module = AnsibleModule( - argument_spec=dict( - api_key=dict(required=True, no_log=True), - app_key=dict(required=True, no_log=True), - api_host=dict(type='str'), - title=dict(required=True), - text=dict(required=True), - date_happened=dict(type='int'), - priority=dict(default='normal', choices=['normal', 'low']), - host=dict(), - tags=dict(type='list', elements='str'), - alert_type=dict(default='info', choices=['error', 'warning', 'info', 'success']), - aggregation_key=dict(no_log=False), - validate_certs=dict(default=True, type='bool'), - ) - ) - - # Prepare Datadog - if not HAS_DATADOG: - module.fail_json(msg=missing_required_lib('datadogpy'), exception=DATADOG_IMP_ERR) - - options = { - 'api_key': module.params['api_key'], - 'app_key': module.params['app_key'], - } - if module.params['api_host'] is not None: - options['api_host'] = module.params['api_host'] - - initialize(**options) - - _post_event(module) - - -def _post_event(module): - try: - if module.params['host'] is None: - module.params['host'] = platform.node().split('.')[0] - msg = api.Event.create(title=module.params['title'], - text=module.params['text'], - host=module.params['host'], - tags=module.params['tags'], - priority=module.params['priority'], - alert_type=module.params['alert_type'], - aggregation_key=module.params['aggregation_key'], - source_type_name='ansible') - if msg['status'] != 'ok': - module.fail_json(msg=msg) - - module.exit_json(changed=True, msg=msg) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/monitoring/datadog/datadog_monitor.py b/ansible_collections/community/general/plugins/modules/monitoring/datadog/datadog_monitor.py deleted file mode 100644 index e9b225dc..00000000 --- a/ansible_collections/community/general/plugins/modules/monitoring/datadog/datadog_monitor.py +++ /dev/null @@ -1,418 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2015, Sebastian Kornehl -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: datadog_monitor -short_description: Manages Datadog monitors -description: - - Manages monitors within Datadog. - - Options as described on https://docs.datadoghq.com/api/. -author: Sebastian Kornehl (@skornehl) -requirements: [datadog] -options: - api_key: - description: - - Your Datadog API key. - required: true - type: str - api_host: - description: - - The URL to the Datadog API. Default value is C(https://api.datadoghq.com). - - This value can also be set with the C(DATADOG_HOST) environment variable. - required: false - type: str - version_added: '0.2.0' - app_key: - description: - - Your Datadog app key. - required: true - type: str - state: - description: - - The designated state of the monitor. - required: true - choices: ['present', 'absent', 'mute', 'unmute'] - type: str - tags: - description: - - A list of tags to associate with your monitor when creating or updating. - - This can help you categorize and filter monitors. - type: list - elements: str - type: - description: - - The type of the monitor. - - The types C(query alert), C(trace-analytics alert) and C(rum alert) were added in community.general 2.1.0. - - The type C(composite) was added in community.general 3.4.0. - choices: - - metric alert - - service check - - event alert - - process alert - - log alert - - query alert - - trace-analytics alert - - rum alert - - composite - type: str - query: - description: - - The monitor query to notify on. - - Syntax varies depending on what type of monitor you are creating. - type: str - name: - description: - - The name of the alert. - required: true - type: str - notification_message: - description: - - A message to include with notifications for this monitor. - - Email notifications can be sent to specific users by using the same '@username' notation as events. - - Monitor message template variables can be accessed by using double square brackets, i.e '[[' and ']]'. - type: str - silenced: - type: dict - description: - - Dictionary of scopes to silence, with timestamps or None. - - Each scope will be muted until the given POSIX timestamp or forever if the value is None. - notify_no_data: - description: - - Whether this monitor will notify when data stops reporting. - type: bool - default: 'no' - no_data_timeframe: - description: - - The number of minutes before a monitor will notify when data stops reporting. - - Must be at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks. - - If not specified, it defaults to 2x timeframe for metric, 2 minutes for service. - type: str - timeout_h: - description: - - The number of hours of the monitor not reporting data before it will automatically resolve from a triggered state. - type: str - renotify_interval: - description: - - The number of minutes after the last notification before a monitor will re-notify on the current status. - - It will only re-notify if it is not resolved. - type: str - escalation_message: - description: - - A message to include with a re-notification. Supports the '@username' notification we allow elsewhere. - - Not applicable if I(renotify_interval=None). - type: str - notify_audit: - description: - - Whether tagged users will be notified on changes to this monitor. - type: bool - default: 'no' - thresholds: - type: dict - description: - - A dictionary of thresholds by status. - - Only available for service checks and metric alerts. - - Because each of them can have multiple thresholds, we do not define them directly in the query. - - "If not specified, it defaults to: C({'ok': 1, 'critical': 1, 'warning': 1})." - locked: - description: - - Whether changes to this monitor should be restricted to the creator or admins. - type: bool - default: 'no' - require_full_window: - description: - - Whether this monitor needs a full window of data before it gets evaluated. - - We highly recommend you set this to False for sparse metrics, otherwise some evaluations will be skipped. - type: bool - new_host_delay: - description: - - A positive integer representing the number of seconds to wait before evaluating the monitor for new hosts. - - This gives the host time to fully initialize. - type: str - evaluation_delay: - description: - - Time to delay evaluation (in seconds). - - Effective for sparse values. - type: str - id: - description: - - The ID of the alert. - - If set, will be used instead of the name to locate the alert. - type: str - include_tags: - description: - - Whether notifications from this monitor automatically inserts its triggering tags into the title. - type: bool - default: yes - version_added: 1.3.0 - priority: - description: - - Integer from 1 (high) to 5 (low) indicating alert severity. - type: int - version_added: 4.6.0 -''' - -EXAMPLES = ''' -- name: Create a metric monitor - community.general.datadog_monitor: - type: "metric alert" - name: "Test monitor" - state: "present" - query: "datadog.agent.up.over('host:host1').last(2).count_by_status()" - notification_message: "Host [[host.name]] with IP [[host.ip]] is failing to report to datadog." - api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" - app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" - -- name: Deletes a monitor - community.general.datadog_monitor: - name: "Test monitor" - state: "absent" - api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" - app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" - -- name: Mutes a monitor - community.general.datadog_monitor: - name: "Test monitor" - state: "mute" - silenced: '{"*":None}' - api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" - app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" - -- name: Unmutes a monitor - community.general.datadog_monitor: - name: "Test monitor" - state: "unmute" - api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" - app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" - -- name: Use datadoghq.eu platform instead of datadoghq.com - community.general.datadog_monitor: - name: "Test monitor" - state: "absent" - api_host: https://api.datadoghq.eu - api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" - app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" -''' -import traceback - -# Import Datadog -DATADOG_IMP_ERR = None -try: - from datadog import initialize, api - HAS_DATADOG = True -except Exception: - DATADOG_IMP_ERR = traceback.format_exc() - HAS_DATADOG = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -def main(): - module = AnsibleModule( - argument_spec=dict( - api_key=dict(required=True, no_log=True), - api_host=dict(), - app_key=dict(required=True, no_log=True), - state=dict(required=True, choices=['present', 'absent', 'mute', 'unmute']), - type=dict(choices=['metric alert', 'service check', 'event alert', 'process alert', - 'log alert', 'query alert', 'trace-analytics alert', - 'rum alert', 'composite']), - name=dict(required=True), - query=dict(), - notification_message=dict(no_log=True), - silenced=dict(type='dict'), - notify_no_data=dict(default=False, type='bool'), - no_data_timeframe=dict(), - timeout_h=dict(), - renotify_interval=dict(), - escalation_message=dict(), - notify_audit=dict(default=False, type='bool'), - thresholds=dict(type='dict', default=None), - tags=dict(type='list', elements='str', default=None), - locked=dict(default=False, type='bool'), - require_full_window=dict(type='bool'), - new_host_delay=dict(), - evaluation_delay=dict(), - id=dict(), - include_tags=dict(required=False, default=True, type='bool'), - priority=dict(type='int'), - ) - ) - - # Prepare Datadog - if not HAS_DATADOG: - module.fail_json(msg=missing_required_lib('datadogpy'), exception=DATADOG_IMP_ERR) - - options = { - 'api_key': module.params['api_key'], - 'api_host': module.params['api_host'], - 'app_key': module.params['app_key'] - } - - initialize(**options) - - # Check if api_key and app_key is correct or not - # if not, then fail here. - response = api.Monitor.get_all() - if isinstance(response, dict): - msg = response.get('errors', None) - if msg: - module.fail_json(msg="Failed to connect Datadog server using given app_key and api_key : {0}".format(msg[0])) - - if module.params['state'] == 'present': - install_monitor(module) - elif module.params['state'] == 'absent': - delete_monitor(module) - elif module.params['state'] == 'mute': - mute_monitor(module) - elif module.params['state'] == 'unmute': - unmute_monitor(module) - - -def _fix_template_vars(message): - if message: - return message.replace('[[', '{{').replace(']]', '}}') - return message - - -def _get_monitor(module): - if module.params['id'] is not None: - monitor = api.Monitor.get(module.params['id']) - if 'errors' in monitor: - module.fail_json(msg="Failed to retrieve monitor with id %s, errors are %s" % (module.params['id'], str(monitor['errors']))) - return monitor - else: - monitors = api.Monitor.get_all() - for monitor in monitors: - if monitor['name'] == _fix_template_vars(module.params['name']): - return monitor - return {} - - -def _post_monitor(module, options): - try: - kwargs = dict(type=module.params['type'], query=module.params['query'], - name=_fix_template_vars(module.params['name']), - message=_fix_template_vars(module.params['notification_message']), - escalation_message=_fix_template_vars(module.params['escalation_message']), - priority=module.params['priority'], - options=options) - if module.params['tags'] is not None: - kwargs['tags'] = module.params['tags'] - msg = api.Monitor.create(**kwargs) - if 'errors' in msg: - module.fail_json(msg=str(msg['errors'])) - else: - module.exit_json(changed=True, msg=msg) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -def _equal_dicts(a, b, ignore_keys): - ka = set(a).difference(ignore_keys) - kb = set(b).difference(ignore_keys) - return ka == kb and all(a[k] == b[k] for k in ka) - - -def _update_monitor(module, monitor, options): - try: - kwargs = dict(id=monitor['id'], query=module.params['query'], - name=_fix_template_vars(module.params['name']), - message=_fix_template_vars(module.params['notification_message']), - escalation_message=_fix_template_vars(module.params['escalation_message']), - priority=module.params['priority'], - options=options) - if module.params['tags'] is not None: - kwargs['tags'] = module.params['tags'] - msg = api.Monitor.update(**kwargs) - - if 'errors' in msg: - module.fail_json(msg=str(msg['errors'])) - elif _equal_dicts(msg, monitor, ['creator', 'overall_state', 'modified', 'matching_downtimes', 'overall_state_modified']): - module.exit_json(changed=False, msg=msg) - else: - module.exit_json(changed=True, msg=msg) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -def install_monitor(module): - options = { - "silenced": module.params['silenced'], - "notify_no_data": module.boolean(module.params['notify_no_data']), - "no_data_timeframe": module.params['no_data_timeframe'], - "timeout_h": module.params['timeout_h'], - "renotify_interval": module.params['renotify_interval'], - "escalation_message": module.params['escalation_message'], - "notify_audit": module.boolean(module.params['notify_audit']), - "locked": module.boolean(module.params['locked']), - "require_full_window": module.params['require_full_window'], - "new_host_delay": module.params['new_host_delay'], - "evaluation_delay": module.params['evaluation_delay'], - "include_tags": module.params['include_tags'], - } - - if module.params['type'] == "service check": - options["thresholds"] = module.params['thresholds'] or {'ok': 1, 'critical': 1, 'warning': 1} - if module.params['type'] in ["metric alert", "log alert", "query alert", "trace-analytics alert", "rum alert"] and module.params['thresholds'] is not None: - options["thresholds"] = module.params['thresholds'] - - monitor = _get_monitor(module) - if not monitor: - _post_monitor(module, options) - else: - _update_monitor(module, monitor, options) - - -def delete_monitor(module): - monitor = _get_monitor(module) - if not monitor: - module.exit_json(changed=False) - try: - msg = api.Monitor.delete(monitor['id']) - module.exit_json(changed=True, msg=msg) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -def mute_monitor(module): - monitor = _get_monitor(module) - if not monitor: - module.fail_json(msg="Monitor %s not found!" % module.params['name']) - elif monitor['options']['silenced']: - module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.") - elif (module.params['silenced'] is not None and len(set(monitor['options']['silenced']) ^ set(module.params['silenced'])) == 0): - module.exit_json(changed=False) - try: - if module.params['silenced'] is None or module.params['silenced'] == "": - msg = api.Monitor.mute(id=monitor['id']) - else: - msg = api.Monitor.mute(id=monitor['id'], silenced=module.params['silenced']) - module.exit_json(changed=True, msg=msg) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -def unmute_monitor(module): - monitor = _get_monitor(module) - if not monitor: - module.fail_json(msg="Monitor %s not found!" % module.params['name']) - elif not monitor['options']['silenced']: - module.exit_json(changed=False) - try: - msg = api.Monitor.unmute(monitor['id']) - module.exit_json(changed=True, msg=msg) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/monitoring/honeybadger_deployment.py b/ansible_collections/community/general/plugins/modules/monitoring/honeybadger_deployment.py deleted file mode 100644 index 2e2198e1..00000000 --- a/ansible_collections/community/general/plugins/modules/monitoring/honeybadger_deployment.py +++ /dev/null @@ -1,128 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2014 Benjamin Curtis -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: honeybadger_deployment -author: "Benjamin Curtis (@stympy)" -short_description: Notify Honeybadger.io about app deployments -description: - - Notify Honeybadger.io about app deployments (see http://docs.honeybadger.io/article/188-deployment-tracking) -options: - token: - type: str - description: - - API token. - required: true - environment: - type: str - description: - - The environment name, typically 'production', 'staging', etc. - required: true - user: - type: str - description: - - The username of the person doing the deployment - repo: - type: str - description: - - URL of the project repository - revision: - type: str - description: - - A hash, number, tag, or other identifier showing what revision was deployed - url: - type: str - description: - - Optional URL to submit the notification to. - default: "https://api.honeybadger.io/v1/deploys" - validate_certs: - description: - - If C(no), SSL certificates for the target url will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - type: bool - default: 'yes' - -''' - -EXAMPLES = ''' -- name: Notify Honeybadger.io about an app deployment - community.general.honeybadger_deployment: - token: AAAAAA - environment: staging - user: ansible - revision: b6826b8 - repo: 'git@github.com:user/repo.git' -''' - -RETURN = '''# ''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.urls import fetch_url - - -# =========================================== -# Module execution. -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - token=dict(required=True, no_log=True), - environment=dict(required=True), - user=dict(required=False), - repo=dict(required=False), - revision=dict(required=False), - url=dict(required=False, default='https://api.honeybadger.io/v1/deploys'), - validate_certs=dict(default=True, type='bool'), - ), - supports_check_mode=True - ) - - params = {} - - if module.params["environment"]: - params["deploy[environment]"] = module.params["environment"] - - if module.params["user"]: - params["deploy[local_username]"] = module.params["user"] - - if module.params["repo"]: - params["deploy[repository]"] = module.params["repo"] - - if module.params["revision"]: - params["deploy[revision]"] = module.params["revision"] - - params["api_key"] = module.params["token"] - - url = module.params.get('url') - - # If we're in check mode, just exit pretending like we succeeded - if module.check_mode: - module.exit_json(changed=True) - - try: - data = urlencode(params) - response, info = fetch_url(module, url, data=data) - except Exception as e: - module.fail_json(msg='Unable to notify Honeybadger: %s' % to_native(e), exception=traceback.format_exc()) - else: - if info['status'] == 201: - module.exit_json(changed=True) - else: - module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url)) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/monitoring/icinga2_feature.py b/ansible_collections/community/general/plugins/modules/monitoring/icinga2_feature.py deleted file mode 100644 index b59c0e11..00000000 --- a/ansible_collections/community/general/plugins/modules/monitoring/icinga2_feature.py +++ /dev/null @@ -1,126 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2016, Loic Blot -# Copyright (c) 2018, Ansible Project -# Sponsored by Infopro Digital. http://www.infopro-digital.com/ -# Sponsored by E.T.A.I. http://www.etai.fr/ -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: icinga2_feature - -short_description: Manage Icinga2 feature -description: - - This module can be used to enable or disable an Icinga2 feature. -author: "Loic Blot (@nerzhul)" -options: - name: - type: str - description: - - This is the feature name to enable or disable. - required: True - state: - type: str - description: - - If set to C(present) and feature is disabled, then feature is enabled. - - If set to C(present) and feature is already enabled, then nothing is changed. - - If set to C(absent) and feature is enabled, then feature is disabled. - - If set to C(absent) and feature is already disabled, then nothing is changed. - choices: [ "present", "absent" ] - default: present -''' - -EXAMPLES = ''' -- name: Enable ido-pgsql feature - community.general.icinga2_feature: - name: ido-pgsql - state: present - -- name: Disable api feature - community.general.icinga2_feature: - name: api - state: absent -''' - -RETURN = ''' -# -''' - -import re -from ansible.module_utils.basic import AnsibleModule - - -class Icinga2FeatureHelper: - def __init__(self, module): - self.module = module - self._icinga2 = module.get_bin_path('icinga2', True) - self.feature_name = self.module.params['name'] - self.state = self.module.params['state'] - - def _exec(self, args): - cmd = [self._icinga2, 'feature'] - rc, out, err = self.module.run_command(cmd + args, check_rc=True) - return rc, out - - def manage(self): - rc, out = self._exec(["list"]) - if rc != 0: - self.module.fail_json(msg="Unable to list icinga2 features. " - "Ensure icinga2 is installed and present in binary path.") - - # If feature is already in good state, just exit - if (re.search("Disabled features:.* %s[ \n]" % self.feature_name, out) and self.state == "absent") or \ - (re.search("Enabled features:.* %s[ \n]" % self.feature_name, out) and self.state == "present"): - self.module.exit_json(changed=False) - - if self.module.check_mode: - self.module.exit_json(changed=True) - - feature_enable_str = "enable" if self.state == "present" else "disable" - - rc, out = self._exec([feature_enable_str, self.feature_name]) - - change_applied = False - if self.state == "present": - if rc != 0: - self.module.fail_json(msg="Failed to %s feature %s." - " icinga2 command returned %s" % (feature_enable_str, - self.feature_name, - out)) - - if re.search("already enabled", out) is None: - change_applied = True - else: - if rc == 0: - change_applied = True - # RC is not 0 for this already disabled feature, handle it as no change applied - elif re.search("Cannot disable feature '%s'. Target file .* does not exist" % self.feature_name, out): - change_applied = False - else: - self.module.fail_json(msg="Failed to disable feature. Command returns %s" % out) - - self.module.exit_json(changed=change_applied) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(type='str', required=True), - state=dict(type='str', choices=["present", "absent"], default="present") - ), - supports_check_mode=True - ) - - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') - Icinga2FeatureHelper(module).manage() - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/monitoring/icinga2_host.py b/ansible_collections/community/general/plugins/modules/monitoring/icinga2_host.py deleted file mode 100644 index b4c4cdbc..00000000 --- a/ansible_collections/community/general/plugins/modules/monitoring/icinga2_host.py +++ /dev/null @@ -1,332 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# This module is proudly sponsored by CGI (www.cgi.com) and -# KPN (www.kpn.com). -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: icinga2_host -short_description: Manage a host in Icinga2 -description: - - "Add or remove a host to Icinga2 through the API." - - "See U(https://www.icinga.com/docs/icinga2/latest/doc/12-icinga2-api/)" -author: "Jurgen Brand (@t794104)" -options: - url: - type: str - description: - - HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path - use_proxy: - description: - - If C(no), it will not use a proxy, even if one is defined in - an environment variable on the target hosts. - type: bool - default: 'yes' - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - type: bool - default: 'yes' - url_username: - type: str - description: - - The username for use in HTTP basic authentication. - - This parameter can be used without C(url_password) for sites that allow empty passwords. - url_password: - type: str - description: - - The password for use in HTTP basic authentication. - - If the C(url_username) parameter is not specified, the C(url_password) parameter will not be used. - force_basic_auth: - description: - - httplib2, the library used by the uri module only sends authentication information when a webservice - responds to an initial request with a 401 status. Since some basic auth services do not properly - send a 401, logins will fail. This option forces the sending of the Basic authentication header - upon initial request. - type: bool - default: 'no' - client_cert: - type: path - description: - - PEM formatted certificate chain file to be used for SSL client - authentication. This file can also include the key as well, and if - the key is included, C(client_key) is not required. - client_key: - type: path - description: - - PEM formatted file that contains your private key to be used for SSL - client authentication. If C(client_cert) contains both the certificate - and key, this option is not required. - state: - type: str - description: - - Apply feature state. - choices: [ "present", "absent" ] - default: present - name: - type: str - description: - - Name used to create / delete the host. This does not need to be the FQDN, but does needs to be unique. - required: true - aliases: [host] - zone: - type: str - description: - - The zone from where this host should be polled. - template: - type: str - description: - - The template used to define the host. - - Template cannot be modified after object creation. - check_command: - type: str - description: - - The command used to check if the host is alive. - default: "hostalive" - display_name: - type: str - description: - - The name used to display the host. - - If not specified, it defaults to the value of the I(name) parameter. - ip: - type: str - description: - - The IP address of the host. - required: true - variables: - type: dict - description: - - Dictionary of variables. -extends_documentation_fragment: - - url -''' - -EXAMPLES = ''' -- name: Add host to icinga - community.general.icinga2_host: - url: "https://icinga2.example.com" - url_username: "ansible" - url_password: "a_secret" - state: present - name: "{{ ansible_fqdn }}" - ip: "{{ ansible_default_ipv4.address }}" - variables: - foo: "bar" - delegate_to: 127.0.0.1 -''' - -RETURN = ''' -name: - description: The name used to create, modify or delete the host - type: str - returned: always -data: - description: The data structure used for create, modify or delete of the host - type: dict - returned: always -''' - -import json -import os - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url, url_argument_spec - - -# =========================================== -# Icinga2 API class -# -class icinga2_api: - module = None - - def __init__(self, module): - self.module = module - - def call_url(self, path, data='', method='GET'): - headers = { - 'Accept': 'application/json', - 'X-HTTP-Method-Override': method, - } - url = self.module.params.get("url") + "/" + path - rsp, info = fetch_url(module=self.module, url=url, data=data, headers=headers, method=method, use_proxy=self.module.params['use_proxy']) - body = '' - if rsp: - body = json.loads(rsp.read()) - if info['status'] >= 400: - body = info['body'] - return {'code': info['status'], 'data': body} - - def check_connection(self): - ret = self.call_url('v1/status') - if ret['code'] == 200: - return True - return False - - def exists(self, hostname): - data = { - "filter": "match(\"" + hostname + "\", host.name)", - } - ret = self.call_url( - path="v1/objects/hosts", - data=self.module.jsonify(data) - ) - if ret['code'] == 200: - if len(ret['data']['results']) == 1: - return True - return False - - def create(self, hostname, data): - ret = self.call_url( - path="v1/objects/hosts/" + hostname, - data=self.module.jsonify(data), - method="PUT" - ) - return ret - - def delete(self, hostname): - data = {"cascade": 1} - ret = self.call_url( - path="v1/objects/hosts/" + hostname, - data=self.module.jsonify(data), - method="DELETE" - ) - return ret - - def modify(self, hostname, data): - ret = self.call_url( - path="v1/objects/hosts/" + hostname, - data=self.module.jsonify(data), - method="POST" - ) - return ret - - def diff(self, hostname, data): - ret = self.call_url( - path="v1/objects/hosts/" + hostname, - method="GET" - ) - changed = False - ic_data = ret['data']['results'][0] - for key in data['attrs']: - if key not in ic_data['attrs'].keys(): - changed = True - elif data['attrs'][key] != ic_data['attrs'][key]: - changed = True - return changed - - -# =========================================== -# Module execution. -# -def main(): - # use the predefined argument spec for url - argument_spec = url_argument_spec() - # add our own arguments - argument_spec.update( - state=dict(default="present", choices=["absent", "present"]), - name=dict(required=True, aliases=['host']), - zone=dict(), - template=dict(default=None), - check_command=dict(default="hostalive"), - display_name=dict(default=None), - ip=dict(required=True), - variables=dict(type='dict', default=None), - ) - - # Define the main module - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True - ) - - state = module.params["state"] - name = module.params["name"] - zone = module.params["zone"] - template = [name] - if module.params["template"]: - template.append(module.params["template"]) - check_command = module.params["check_command"] - ip = module.params["ip"] - display_name = module.params["display_name"] - if not display_name: - display_name = name - variables = module.params["variables"] - - try: - icinga = icinga2_api(module=module) - icinga.check_connection() - except Exception as e: - module.fail_json(msg="unable to connect to Icinga. Exception message: %s" % (e)) - - data = { - 'attrs': { - 'address': ip, - 'display_name': display_name, - 'check_command': check_command, - 'zone': zone, - 'vars': { - 'made_by': "ansible", - }, - 'templates': template, - } - } - - if variables: - data['attrs']['vars'].update(variables) - - changed = False - if icinga.exists(name): - if state == "absent": - if module.check_mode: - module.exit_json(changed=True, name=name, data=data) - else: - try: - ret = icinga.delete(name) - if ret['code'] == 200: - changed = True - else: - module.fail_json(msg="bad return code (%s) deleting host: '%s'" % (ret['code'], ret['data'])) - except Exception as e: - module.fail_json(msg="exception deleting host: " + str(e)) - - elif icinga.diff(name, data): - if module.check_mode: - module.exit_json(changed=False, name=name, data=data) - - # Template attribute is not allowed in modification - del data['attrs']['templates'] - - ret = icinga.modify(name, data) - - if ret['code'] == 200: - changed = True - else: - module.fail_json(msg="bad return code (%s) modifying host: '%s'" % (ret['code'], ret['data'])) - - else: - if state == "present": - if module.check_mode: - changed = True - else: - try: - ret = icinga.create(name, data) - if ret['code'] == 200: - changed = True - else: - module.fail_json(msg="bad return code (%s) creating host: '%s'" % (ret['code'], ret['data'])) - except Exception as e: - module.fail_json(msg="exception creating host: " + str(e)) - - module.exit_json(changed=changed, name=name, data=data) - - -# import module snippets -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/monitoring/librato_annotation.py b/ansible_collections/community/general/plugins/modules/monitoring/librato_annotation.py deleted file mode 100644 index 6fcabcf3..00000000 --- a/ansible_collections/community/general/plugins/modules/monitoring/librato_annotation.py +++ /dev/null @@ -1,167 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (C) Seth Edwards, 2014 -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: librato_annotation -short_description: create an annotation in librato -description: - - Create an annotation event on the given annotation stream :name. If the annotation stream does not exist, it will be created automatically -author: "Seth Edwards (@Sedward)" -requirements: [] -options: - user: - type: str - description: - - Librato account username - required: true - api_key: - type: str - description: - - Librato account api key - required: true - name: - type: str - description: - - The annotation stream name - - If the annotation stream does not exist, it will be created automatically - required: false - title: - type: str - description: - - The title of an annotation is a string and may contain spaces - - The title should be a short, high-level summary of the annotation e.g. v45 Deployment - required: true - source: - type: str - description: - - A string which describes the originating source of an annotation when that annotation is tracked across multiple members of a population - required: false - description: - type: str - description: - - The description contains extra metadata about a particular annotation - - The description should contain specifics on the individual annotation e.g. Deployed 9b562b2 shipped new feature foo! - required: false - start_time: - type: int - description: - - The unix timestamp indicating the time at which the event referenced by this annotation started - required: false - end_time: - type: int - description: - - The unix timestamp indicating the time at which the event referenced by this annotation ended - - For events that have a duration, this is a useful way to annotate the duration of the event - required: false - links: - type: list - elements: dict - description: - - See examples -''' - -EXAMPLES = ''' -- name: Create a simple annotation event with a source - community.general.librato_annotation: - user: user@example.com - api_key: XXXXXXXXXXXXXXXXX - title: App Config Change - source: foo.bar - description: This is a detailed description of the config change - -- name: Create an annotation that includes a link - community.general.librato_annotation: - user: user@example.com - api_key: XXXXXXXXXXXXXXXXXX - name: code.deploy - title: app code deploy - description: this is a detailed description of a deployment - links: - - rel: example - href: http://www.example.com/deploy - -- name: Create an annotation with a start_time and end_time - community.general.librato_annotation: - user: user@example.com - api_key: XXXXXXXXXXXXXXXXXX - name: maintenance - title: Maintenance window - description: This is a detailed description of maintenance - start_time: 1395940006 - end_time: 1395954406 -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url - - -def post_annotation(module): - user = module.params['user'] - api_key = module.params['api_key'] - name = module.params['name'] - title = module.params['title'] - - url = 'https://metrics-api.librato.com/v1/annotations/%s' % name - params = {} - params['title'] = title - - if module.params['source'] is not None: - params['source'] = module.params['source'] - if module.params['description'] is not None: - params['description'] = module.params['description'] - if module.params['start_time'] is not None: - params['start_time'] = module.params['start_time'] - if module.params['end_time'] is not None: - params['end_time'] = module.params['end_time'] - if module.params['links'] is not None: - params['links'] = module.params['links'] - - json_body = module.jsonify(params) - - headers = {} - headers['Content-Type'] = 'application/json' - - # Hack send parameters the way fetch_url wants them - module.params['url_username'] = user - module.params['url_password'] = api_key - response, info = fetch_url(module, url, data=json_body, headers=headers) - response_code = str(info['status']) - response_body = info['body'] - if info['status'] != 201: - if info['status'] >= 400: - module.fail_json(msg="Request Failed. Response code: " + response_code + " Response body: " + response_body) - else: - module.fail_json(msg="Request Failed. Response code: " + response_code) - response = response.read() - module.exit_json(changed=True, annotation=response) - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - user=dict(required=True), - api_key=dict(required=True, no_log=True), - name=dict(required=False), - title=dict(required=True), - source=dict(required=False), - description=dict(required=False), - start_time=dict(required=False, default=None, type='int'), - end_time=dict(required=False, default=None, type='int'), - links=dict(type='list', elements='dict') - ) - ) - - post_annotation(module) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/monitoring/logentries.py b/ansible_collections/community/general/plugins/modules/monitoring/logentries.py deleted file mode 100644 index 07575286..00000000 --- a/ansible_collections/community/general/plugins/modules/monitoring/logentries.py +++ /dev/null @@ -1,156 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Ivan Vanderbyl -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: logentries -author: "Ivan Vanderbyl (@ivanvanderbyl)" -short_description: Module for tracking logs via logentries.com -description: - - Sends logs to LogEntries in realtime -options: - path: - type: str - description: - - path to a log file - required: true - state: - type: str - description: - - following state of the log - choices: [ 'present', 'absent', 'followed', 'unfollowed' ] - required: false - default: present - name: - type: str - description: - - name of the log - required: false - logtype: - type: str - description: - - type of the log - required: false - aliases: [type] - -notes: - - Requires the LogEntries agent which can be installed following the instructions at logentries.com -''' -EXAMPLES = ''' -- name: Track nginx logs - community.general.logentries: - path: /var/log/nginx/access.log - state: present - name: nginx-access-log - -- name: Stop tracking nginx logs - community.general.logentries: - path: /var/log/nginx/error.log - state: absent -''' - -from ansible.module_utils.basic import AnsibleModule - - -def query_log_status(module, le_path, path, state="present"): - """ Returns whether a log is followed or not. """ - - if state == "present": - rc, out, err = module.run_command([le_path, "followed", path]) - if rc == 0: - return True - - return False - - -def follow_log(module, le_path, logs, name=None, logtype=None): - """ Follows one or more logs if not already followed. """ - - followed_count = 0 - - for log in logs: - if query_log_status(module, le_path, log): - continue - - if module.check_mode: - module.exit_json(changed=True) - - cmd = [le_path, 'follow', log] - if name: - cmd.extend(['--name', name]) - if logtype: - cmd.extend(['--type', logtype]) - rc, out, err = module.run_command(cmd) - - if not query_log_status(module, le_path, log): - module.fail_json(msg="failed to follow '%s': %s" % (log, err.strip())) - - followed_count += 1 - - if followed_count > 0: - module.exit_json(changed=True, msg="followed %d log(s)" % (followed_count,)) - - module.exit_json(changed=False, msg="logs(s) already followed") - - -def unfollow_log(module, le_path, logs): - """ Unfollows one or more logs if followed. """ - - removed_count = 0 - - # Using a for loop in case of error, we can report the package that failed - for log in logs: - # Query the log first, to see if we even need to remove. - if not query_log_status(module, le_path, log): - continue - - if module.check_mode: - module.exit_json(changed=True) - rc, out, err = module.run_command([le_path, 'rm', log]) - - if query_log_status(module, le_path, log): - module.fail_json(msg="failed to remove '%s': %s" % (log, err.strip())) - - removed_count += 1 - - if removed_count > 0: - module.exit_json(changed=True, msg="removed %d package(s)" % removed_count) - - module.exit_json(changed=False, msg="logs(s) already unfollowed") - - -def main(): - module = AnsibleModule( - argument_spec=dict( - path=dict(required=True), - state=dict(default="present", choices=["present", "followed", "absent", "unfollowed"]), - name=dict(required=False, default=None, type='str'), - logtype=dict(required=False, default=None, type='str', aliases=['type']) - ), - supports_check_mode=True - ) - - le_path = module.get_bin_path('le', True, ['/usr/local/bin']) - - p = module.params - - # Handle multiple log files - logs = p["path"].split(",") - logs = filter(None, logs) - - if p["state"] in ["present", "followed"]: - follow_log(module, le_path, logs, name=p['name'], logtype=p['logtype']) - - elif p["state"] in ["absent", "unfollowed"]: - unfollow_log(module, le_path, logs) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/monitoring/logstash_plugin.py b/ansible_collections/community/general/plugins/modules/monitoring/logstash_plugin.py deleted file mode 100644 index 13b1233c..00000000 --- a/ansible_collections/community/general/plugins/modules/monitoring/logstash_plugin.py +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2017, Loic Blot -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: logstash_plugin -short_description: Manage Logstash plugins -description: - - Manages Logstash plugins. -author: Loic Blot (@nerzhul) -options: - name: - type: str - description: - - Install plugin with that name. - required: True - state: - type: str - description: - - Apply plugin state. - choices: ["present", "absent"] - default: present - plugin_bin: - type: path - description: - - Specify logstash-plugin to use for plugin management. - default: /usr/share/logstash/bin/logstash-plugin - proxy_host: - type: str - description: - - Proxy host to use during plugin installation. - proxy_port: - type: str - description: - - Proxy port to use during plugin installation. - version: - type: str - description: - - Specify plugin Version of the plugin to install. - If plugin exists with previous version, it will NOT be updated. -''' - -EXAMPLES = ''' -- name: Install Logstash beats input plugin - community.general.logstash_plugin: - state: present - name: logstash-input-beats - -- name: Install specific version of a plugin - community.general.logstash_plugin: - state: present - name: logstash-input-syslog - version: '3.2.0' - -- name: Uninstall Logstash plugin - community.general.logstash_plugin: - state: absent - name: logstash-filter-multiline - -- name: Install Logstash plugin with alternate heap size - community.general.logstash_plugin: - state: present - name: logstash-input-beats - environment: - LS_JAVA_OPTS: "-Xms256m -Xmx256m" -''' - -from ansible.module_utils.basic import AnsibleModule - - -PACKAGE_STATE_MAP = dict( - present="install", - absent="remove" -) - - -def is_plugin_present(module, plugin_bin, plugin_name): - cmd_args = [plugin_bin, "list", plugin_name] - rc, out, err = module.run_command(cmd_args) - return rc == 0 - - -def parse_error(string): - reason = "reason: " - try: - return string[string.index(reason) + len(reason):].strip() - except ValueError: - return string - - -def install_plugin(module, plugin_bin, plugin_name, version, proxy_host, proxy_port): - cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"], plugin_name] - - if version: - cmd_args.append("--version %s" % version) - - if proxy_host and proxy_port: - cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % (proxy_host, proxy_port)) - - cmd = " ".join(cmd_args) - - if module.check_mode: - rc, out, err = 0, "check mode", "" - else: - rc, out, err = module.run_command(cmd) - - if rc != 0: - reason = parse_error(out) - module.fail_json(msg=reason) - - return True, cmd, out, err - - -def remove_plugin(module, plugin_bin, plugin_name): - cmd_args = [plugin_bin, PACKAGE_STATE_MAP["absent"], plugin_name] - - cmd = " ".join(cmd_args) - - if module.check_mode: - rc, out, err = 0, "check mode", "" - else: - rc, out, err = module.run_command(cmd) - - if rc != 0: - reason = parse_error(out) - module.fail_json(msg=reason) - - return True, cmd, out, err - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - state=dict(default="present", choices=list(PACKAGE_STATE_MAP.keys())), - plugin_bin=dict(default="/usr/share/logstash/bin/logstash-plugin", type="path"), - proxy_host=dict(), - proxy_port=dict(), - version=dict() - ), - supports_check_mode=True - ) - - name = module.params["name"] - state = module.params["state"] - plugin_bin = module.params["plugin_bin"] - proxy_host = module.params["proxy_host"] - proxy_port = module.params["proxy_port"] - version = module.params["version"] - - present = is_plugin_present(module, plugin_bin, name) - - # skip if the state is correct - if (present and state == "present") or (state == "absent" and not present): - module.exit_json(changed=False, name=name, state=state) - - if state == "present": - changed, cmd, out, err = install_plugin(module, plugin_bin, name, version, proxy_host, proxy_port) - elif state == "absent": - changed, cmd, out, err = remove_plugin(module, plugin_bin, name) - - module.exit_json(changed=changed, cmd=cmd, name=name, state=state, stdout=out, stderr=err) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/monitoring/monit.py b/ansible_collections/community/general/plugins/modules/monitoring/monit.py deleted file mode 100644 index dfbe9cee..00000000 --- a/ansible_collections/community/general/plugins/modules/monitoring/monit.py +++ /dev/null @@ -1,341 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Darryl Stoflet -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: monit -short_description: Manage the state of a program monitored via Monit -description: - - Manage the state of a program monitored via I(Monit). -options: - name: - description: - - The name of the I(monit) program/process to manage. - required: true - type: str - state: - description: - - The state of service. - required: true - choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ] - type: str - timeout: - description: - - If there are pending actions for the service monitored by monit, then Ansible will check - for up to this many seconds to verify the requested action has been performed. - Ansible will sleep for five seconds between each check. - default: 300 - type: int -author: - - Darryl Stoflet (@dstoflet) - - Simon Kelly (@snopoke) -''' - -EXAMPLES = ''' -- name: Manage the state of program httpd to be in started state - community.general.monit: - name: httpd - state: started -''' - -import time -import re - -from collections import namedtuple - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six import python_2_unicode_compatible - - -STATE_COMMAND_MAP = { - 'stopped': 'stop', - 'started': 'start', - 'monitored': 'monitor', - 'unmonitored': 'unmonitor', - 'restarted': 'restart' -} - -MONIT_SERVICES = ['Process', 'File', 'Fifo', 'Filesystem', 'Directory', 'Remote host', 'System', 'Program', - 'Network'] - - -@python_2_unicode_compatible -class StatusValue(namedtuple("Status", "value, is_pending")): - MISSING = 'missing' - OK = 'ok' - NOT_MONITORED = 'not_monitored' - INITIALIZING = 'initializing' - DOES_NOT_EXIST = 'does_not_exist' - EXECUTION_FAILED = 'execution_failed' - ALL_STATUS = [ - MISSING, OK, NOT_MONITORED, INITIALIZING, DOES_NOT_EXIST, EXECUTION_FAILED - ] - - def __new__(cls, value, is_pending=False): - return super(StatusValue, cls).__new__(cls, value, is_pending) - - def pending(self): - return StatusValue(self.value, True) - - def __getattr__(self, item): - if item in ('is_%s' % status for status in self.ALL_STATUS): - return self.value == getattr(self, item[3:].upper()) - raise AttributeError(item) - - def __str__(self): - return "%s%s" % (self.value, " (pending)" if self.is_pending else "") - - -class Status(object): - MISSING = StatusValue(StatusValue.MISSING) - OK = StatusValue(StatusValue.OK) - RUNNING = StatusValue(StatusValue.OK) - NOT_MONITORED = StatusValue(StatusValue.NOT_MONITORED) - INITIALIZING = StatusValue(StatusValue.INITIALIZING) - DOES_NOT_EXIST = StatusValue(StatusValue.DOES_NOT_EXIST) - EXECUTION_FAILED = StatusValue(StatusValue.EXECUTION_FAILED) - - -class Monit(object): - def __init__(self, module, monit_bin_path, service_name, timeout): - self.module = module - self.monit_bin_path = monit_bin_path - self.process_name = service_name - self.timeout = timeout - - self._monit_version = None - self._raw_version = None - self._status_change_retry_count = 6 - - def monit_version(self): - if self._monit_version is None: - self._raw_version, version = self._get_monit_version() - # Use only major and minor even if there are more these should be enough - self._monit_version = version[0], version[1] - return self._monit_version - - def _get_monit_version(self): - rc, out, err = self.module.run_command([self.monit_bin_path, '-V'], check_rc=True) - version_line = out.split('\n')[0] - raw_version = re.search(r"([0-9]+\.){1,2}([0-9]+)?", version_line).group() - return raw_version, tuple(map(int, raw_version.split('.'))) - - def exit_fail(self, msg, status=None, **kwargs): - kwargs.update({ - 'msg': msg, - 'monit_version': self._raw_version, - 'process_status': str(status) if status else None, - }) - self.module.fail_json(**kwargs) - - def exit_success(self, state): - self.module.exit_json(changed=True, name=self.process_name, state=state) - - @property - def command_args(self): - return ["-B"] if self.monit_version() > (5, 18) else [] - - def get_status(self, validate=False): - """Return the status of the process in monit. - - :@param validate: Force monit to re-check the status of the process - """ - monit_command = "validate" if validate else "status" - check_rc = False if validate else True # 'validate' always has rc = 1 - command = [self.monit_bin_path, monit_command] + self.command_args + [self.process_name] - rc, out, err = self.module.run_command(command, check_rc=check_rc) - return self._parse_status(out, err) - - def _parse_status(self, output, err): - escaped_monit_services = '|'.join([re.escape(x) for x in MONIT_SERVICES]) - pattern = "(%s) '%s'" % (escaped_monit_services, re.escape(self.process_name)) - if not re.search(pattern, output, re.IGNORECASE): - return Status.MISSING - - status_val = re.findall(r"^\s*status\s*([\w\- ]+)", output, re.MULTILINE) - if not status_val: - self.exit_fail("Unable to find process status", stdout=output, stderr=err) - - status_val = status_val[0].strip().upper() - if ' | ' in status_val: - status_val = status_val.split(' | ')[0] - if ' - ' not in status_val: - status_val = status_val.replace(' ', '_') - return getattr(Status, status_val) - else: - status_val, substatus = status_val.split(' - ') - action, state = substatus.split() - if action in ['START', 'INITIALIZING', 'RESTART', 'MONITOR']: - status = Status.OK - else: - status = Status.NOT_MONITORED - - if state == 'pending': - status = status.pending() - return status - - def is_process_present(self): - command = [self.monit_bin_path, 'summary'] + self.command_args - rc, out, err = self.module.run_command(command, check_rc=True) - return bool(re.findall(r'\b%s\b' % self.process_name, out)) - - def is_process_running(self): - return self.get_status().is_ok - - def run_command(self, command): - """Runs a monit command, and returns the new status.""" - return self.module.run_command([self.monit_bin_path, command, self.process_name], check_rc=True) - - def wait_for_status_change(self, current_status): - running_status = self.get_status() - if running_status.value != current_status.value or current_status.value == StatusValue.EXECUTION_FAILED: - return running_status - - loop_count = 0 - while running_status.value == current_status.value: - if loop_count >= self._status_change_retry_count: - self.exit_fail('waited too long for monit to change state', running_status) - - loop_count += 1 - time.sleep(0.5) - validate = loop_count % 2 == 0 # force recheck of status every second try - running_status = self.get_status(validate) - return running_status - - def wait_for_monit_to_stop_pending(self, current_status=None): - """Fails this run if there is no status or it's pending/initializing for timeout""" - timeout_time = time.time() + self.timeout - - if not current_status: - current_status = self.get_status() - waiting_status = [ - StatusValue.MISSING, - StatusValue.INITIALIZING, - StatusValue.DOES_NOT_EXIST, - ] - while current_status.is_pending or (current_status.value in waiting_status): - if time.time() >= timeout_time: - self.exit_fail('waited too long for "pending", or "initiating" status to go away', current_status) - - time.sleep(5) - current_status = self.get_status(validate=True) - return current_status - - def reload(self): - rc, out, err = self.module.run_command([self.monit_bin_path, 'reload']) - if rc != 0: - self.exit_fail('monit reload failed', stdout=out, stderr=err) - self.exit_success(state='reloaded') - - def present(self): - self.run_command('reload') - - timeout_time = time.time() + self.timeout - while not self.is_process_present(): - if time.time() >= timeout_time: - self.exit_fail('waited too long for process to become "present"') - - time.sleep(5) - - self.exit_success(state='present') - - def change_state(self, state, expected_status, invert_expected=None): - current_status = self.get_status() - self.run_command(STATE_COMMAND_MAP[state]) - status = self.wait_for_status_change(current_status) - status = self.wait_for_monit_to_stop_pending(status) - status_match = status.value == expected_status.value - if invert_expected: - status_match = not status_match - if status_match: - self.exit_success(state=state) - self.exit_fail('%s process not %s' % (self.process_name, state), status) - - def stop(self): - self.change_state('stopped', Status.NOT_MONITORED) - - def unmonitor(self): - self.change_state('unmonitored', Status.NOT_MONITORED) - - def restart(self): - self.change_state('restarted', Status.OK) - - def start(self): - self.change_state('started', Status.OK) - - def monitor(self): - self.change_state('monitored', Status.NOT_MONITORED, invert_expected=True) - - -def main(): - arg_spec = dict( - name=dict(required=True), - timeout=dict(default=300, type='int'), - state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'monitored', 'unmonitored', 'reloaded']) - ) - - module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) - - name = module.params['name'] - state = module.params['state'] - timeout = module.params['timeout'] - - monit = Monit(module, module.get_bin_path('monit', True), name, timeout) - - def exit_if_check_mode(): - if module.check_mode: - module.exit_json(changed=True) - - if state == 'reloaded': - exit_if_check_mode() - monit.reload() - - present = monit.is_process_present() - - if not present and not state == 'present': - module.fail_json(msg='%s process not presently configured with monit' % name, name=name) - - if state == 'present': - if present: - module.exit_json(changed=False, name=name, state=state) - exit_if_check_mode() - monit.present() - - monit.wait_for_monit_to_stop_pending() - running = monit.is_process_running() - - if running and state in ['started', 'monitored']: - module.exit_json(changed=False, name=name, state=state) - - if running and state == 'stopped': - exit_if_check_mode() - monit.stop() - - if running and state == 'unmonitored': - exit_if_check_mode() - monit.unmonitor() - - elif state == 'restarted': - exit_if_check_mode() - monit.restart() - - elif not running and state == 'started': - exit_if_check_mode() - monit.start() - - elif not running and state == 'monitored': - exit_if_check_mode() - monit.monitor() - - module.exit_json(changed=False, name=name, state=state) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/monitoring/nagios.py b/ansible_collections/community/general/plugins/modules/monitoring/nagios.py deleted file mode 100644 index 248fd105..00000000 --- a/ansible_collections/community/general/plugins/modules/monitoring/nagios.py +++ /dev/null @@ -1,1304 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# This file is largely copied from the Nagios module included in the -# Func project. Original copyright follows: -# -# func-nagios - Schedule downtime and enables/disable notifications -# Copyright 2011, Red Hat, Inc. -# Tim Bielawa -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: nagios -short_description: Perform common tasks in Nagios related to downtime and notifications. -description: - - "The C(nagios) module has two basic functions: scheduling downtime and toggling alerts for services or hosts." - - The C(nagios) module is not idempotent. - - All actions require the I(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}}) variable to refer - to the host the playbook is currently running on. - - You can specify multiple services at once by separating them with commas, .e.g., C(services=httpd,nfs,puppet). - - When specifying what service to handle there is a special service value, I(host), which will handle alerts/downtime/acknowledge for the I(host itself), - e.g., C(service=host). This keyword may not be given with other services at the same time. - I(Setting alerts/downtime/acknowledge for a host does not affect alerts/downtime/acknowledge for any of the services running on it.) - To schedule downtime for all services on particular host use keyword "all", e.g., C(service=all). -options: - action: - description: - - Action to take. - - servicegroup options were added in 2.0. - - delete_downtime options were added in 2.2. - - The C(acknowledge) and C(forced_check) actions were added in community.general 1.2.0. - required: true - choices: [ "downtime", "delete_downtime", "enable_alerts", "disable_alerts", "silence", "unsilence", - "silence_nagios", "unsilence_nagios", "command", "servicegroup_service_downtime", - "servicegroup_host_downtime", "acknowledge", "forced_check" ] - type: str - host: - description: - - Host to operate on in Nagios. - type: str - cmdfile: - description: - - Path to the nagios I(command file) (FIFO pipe). - Only required if auto-detection fails. - type: str - author: - description: - - Author to leave downtime comments as. - Only usable with the C(downtime) and C(acknowledge) action. - type: str - default: Ansible - comment: - description: - - Comment for C(downtime) and C(acknowledge)action. - type: str - default: Scheduling downtime - start: - description: - - When downtime should start, in time_t format (epoch seconds). - version_added: '0.2.0' - type: str - minutes: - description: - - Minutes to schedule downtime for. - - Only usable with the C(downtime) action. - type: int - default: 30 - services: - description: - - What to manage downtime/alerts for. Separate multiple services with commas. - C(service) is an alias for C(services). - B(Required) option when using the C(downtime), C(acknowledge), C(forced_check), C(enable_alerts), and C(disable_alerts) actions. - aliases: [ "service" ] - type: str - servicegroup: - description: - - The Servicegroup we want to set downtimes/alerts for. - B(Required) option when using the C(servicegroup_service_downtime) amd C(servicegroup_host_downtime). - type: str - command: - description: - - The raw command to send to nagios, which - should not include the submitted time header or the line-feed - B(Required) option when using the C(command) action. - type: str - -author: "Tim Bielawa (@tbielawa)" -''' - -EXAMPLES = ''' -- name: Set 30 minutes of apache downtime - community.general.nagios: - action: downtime - minutes: 30 - service: httpd - host: '{{ inventory_hostname }}' - -- name: Schedule an hour of HOST downtime - community.general.nagios: - action: downtime - minutes: 60 - service: host - host: '{{ inventory_hostname }}' - -- name: Schedule an hour of HOST downtime starting at 2019-04-23T02:00:00+00:00 - community.general.nagios: - action: downtime - start: 1555984800 - minutes: 60 - service: host - host: '{{ inventory_hostname }}' - -- name: Schedule an hour of HOST downtime, with a comment describing the reason - community.general.nagios: - action: downtime - minutes: 60 - service: host - host: '{{ inventory_hostname }}' - comment: Rebuilding machine - -- name: Schedule downtime for ALL services on HOST - community.general.nagios: - action: downtime - minutes: 45 - service: all - host: '{{ inventory_hostname }}' - -- name: Schedule downtime for a few services - community.general.nagios: - action: downtime - services: frob,foobar,qeuz - host: '{{ inventory_hostname }}' - -- name: Set 30 minutes downtime for all services in servicegroup foo - community.general.nagios: - action: servicegroup_service_downtime - minutes: 30 - servicegroup: foo - host: '{{ inventory_hostname }}' - -- name: Set 30 minutes downtime for all host in servicegroup foo - community.general.nagios: - action: servicegroup_host_downtime - minutes: 30 - servicegroup: foo - host: '{{ inventory_hostname }}' - -- name: Delete all downtime for a given host - community.general.nagios: - action: delete_downtime - host: '{{ inventory_hostname }}' - service: all - -- name: Delete all downtime for HOST with a particular comment - community.general.nagios: - action: delete_downtime - host: '{{ inventory_hostname }}' - service: host - comment: Planned maintenance - -- name: Acknowledge an HOST with a particular comment - community.general.nagios: - action: acknowledge - service: host - host: '{{ inventory_hostname }}' - comment: 'power outage - see casenr 12345' - -- name: Acknowledge an active service problem for the httpd service with a particular comment - community.general.nagios: - action: acknowledge - service: httpd - host: '{{ inventory_hostname }}' - comment: 'service crashed - see casenr 12345' - -- name: Reset a passive service check for snmp trap - community.general.nagios: - action: forced_check - service: snmp - host: '{{ inventory_hostname }}' - -- name: Force an active service check for the httpd service - community.general.nagios: - action: forced_check - service: httpd - host: '{{ inventory_hostname }}' - -- name: Force an active service check for all services of a particular host - community.general.nagios: - action: forced_check - service: all - host: '{{ inventory_hostname }}' - -- name: Force an active service check for a particular host - community.general.nagios: - action: forced_check - service: host - host: '{{ inventory_hostname }}' - -- name: Enable SMART disk alerts - community.general.nagios: - action: enable_alerts - service: smart - host: '{{ inventory_hostname }}' - -- name: Disable httpd and nfs alerts - community.general.nagios: - action: disable_alerts - service: httpd,nfs - host: '{{ inventory_hostname }}' - -- name: Disable HOST alerts - community.general.nagios: - action: disable_alerts - service: host - host: '{{ inventory_hostname }}' - -- name: Silence ALL alerts - community.general.nagios: - action: silence - host: '{{ inventory_hostname }}' - -- name: Unsilence all alerts - community.general.nagios: - action: unsilence - host: '{{ inventory_hostname }}' - -- name: Shut up nagios - community.general.nagios: - action: silence_nagios - -- name: Annoy me negios - community.general.nagios: - action: unsilence_nagios - -- name: Command something - community.general.nagios: - action: command - command: DISABLE_FAILURE_PREDICTION -''' - -import time -import os.path -import stat - -from ansible.module_utils.basic import AnsibleModule - - -###################################################################### - -def which_cmdfile(): - locations = [ - # rhel - '/etc/nagios/nagios.cfg', - # debian - '/etc/nagios3/nagios.cfg', - # older debian - '/etc/nagios2/nagios.cfg', - # bsd, solaris - '/usr/local/etc/nagios/nagios.cfg', - # groundwork it monitoring - '/usr/local/groundwork/nagios/etc/nagios.cfg', - # open monitoring distribution - '/omd/sites/oppy/tmp/nagios/nagios.cfg', - # ??? - '/usr/local/nagios/etc/nagios.cfg', - '/usr/local/nagios/nagios.cfg', - '/opt/nagios/etc/nagios.cfg', - '/opt/nagios/nagios.cfg', - # icinga on debian/ubuntu - '/etc/icinga/icinga.cfg', - # icinga installed from source (default location) - '/usr/local/icinga/etc/icinga.cfg', - ] - - for path in locations: - if os.path.exists(path): - for line in open(path): - if line.startswith('command_file'): - return line.split('=')[1].strip() - - return None - -###################################################################### - - -def main(): - ACTION_CHOICES = [ - 'downtime', - 'delete_downtime', - 'silence', - 'unsilence', - 'enable_alerts', - 'disable_alerts', - 'silence_nagios', - 'unsilence_nagios', - 'command', - 'servicegroup_host_downtime', - 'servicegroup_service_downtime', - 'acknowledge', - 'forced_check', - ] - - module = AnsibleModule( - argument_spec=dict( - action=dict(required=True, choices=ACTION_CHOICES), - author=dict(default='Ansible'), - comment=dict(default='Scheduling downtime'), - host=dict(required=False, default=None), - servicegroup=dict(required=False, default=None), - start=dict(required=False, default=None), - minutes=dict(default=30, type='int'), - cmdfile=dict(default=which_cmdfile()), - services=dict(default=None, aliases=['service']), - command=dict(required=False, default=None), - ) - ) - - action = module.params['action'] - host = module.params['host'] - servicegroup = module.params['servicegroup'] - start = module.params['start'] - services = module.params['services'] - cmdfile = module.params['cmdfile'] - command = module.params['command'] - - ################################################################## - # Required args per action: - # downtime = (minutes, service, host) - # acknowledge = (service, host) - # (un)silence = (host) - # (enable/disable)_alerts = (service, host) - # command = command - # - # AnsibleModule will verify most stuff, we need to verify - # 'service' manually. - - ################################################################## - if action not in ['command', 'silence_nagios', 'unsilence_nagios']: - if not host: - module.fail_json(msg='no host specified for action requiring one') - ###################################################################### - if action == 'downtime': - # Make sure there's an actual service selected - if not services: - module.fail_json(msg='no service selected to set downtime for') - - ###################################################################### - if action == 'delete_downtime': - # Make sure there's an actual service selected - if not services: - module.fail_json(msg='no service selected to set downtime for') - - ###################################################################### - - if action in ['servicegroup_service_downtime', 'servicegroup_host_downtime']: - # Make sure there's an actual servicegroup selected - if not servicegroup: - module.fail_json(msg='no servicegroup selected to set downtime for') - - ################################################################## - if action in ['enable_alerts', 'disable_alerts']: - if not services: - module.fail_json(msg='a service is required when setting alerts') - - if action in ['command']: - if not command: - module.fail_json(msg='no command passed for command action') - ###################################################################### - if action == 'acknowledge': - # Make sure there's an actual service selected - if not services: - module.fail_json(msg='no service selected to acknowledge') - - ################################################################## - if action == 'forced_check': - # Make sure there's an actual service selected - if not services: - module.fail_json(msg='no service selected to check') - - ################################################################## - if not cmdfile: - module.fail_json(msg='unable to locate nagios.cfg') - - ################################################################## - ansible_nagios = Nagios(module, **module.params) - if module.check_mode: - module.exit_json(changed=True) - else: - ansible_nagios.act() - ################################################################## - - -###################################################################### -class Nagios(object): - """ - Perform common tasks in Nagios related to downtime and - notifications. - - The complete set of external commands Nagios handles is documented - on their website: - - http://old.nagios.org/developerinfo/externalcommands/commandlist.php - - Note that in the case of `schedule_svc_downtime`, - `enable_svc_notifications`, and `disable_svc_notifications`, the - service argument should be passed as a list. - """ - - def __init__(self, module, **kwargs): - self.module = module - self.action = kwargs['action'] - self.author = kwargs['author'] - self.comment = kwargs['comment'] - self.host = kwargs['host'] - self.servicegroup = kwargs['servicegroup'] - if kwargs['start'] is not None: - self.start = int(kwargs['start']) - else: - self.start = None - self.minutes = kwargs['minutes'] - self.cmdfile = kwargs['cmdfile'] - self.command = kwargs['command'] - - if (kwargs['services'] is None) or (kwargs['services'] == 'host') or (kwargs['services'] == 'all'): - self.services = kwargs['services'] - else: - self.services = kwargs['services'].split(',') - - self.command_results = [] - - def _now(self): - """ - The time in seconds since 12:00:00AM Jan 1, 1970 - """ - - return int(time.time()) - - def _write_command(self, cmd): - """ - Write the given command to the Nagios command file - """ - - if not os.path.exists(self.cmdfile): - self.module.fail_json(msg='nagios command file does not exist', - cmdfile=self.cmdfile) - if not stat.S_ISFIFO(os.stat(self.cmdfile).st_mode): - self.module.fail_json(msg='nagios command file is not a fifo file', - cmdfile=self.cmdfile) - try: - fp = open(self.cmdfile, 'w') - fp.write(cmd) - fp.flush() - fp.close() - self.command_results.append(cmd.strip()) - except IOError: - self.module.fail_json(msg='unable to write to nagios command file', - cmdfile=self.cmdfile) - - def _fmt_dt_str(self, cmd, host, duration, author=None, - comment=None, start=None, - svc=None, fixed=1, trigger=0): - """ - Format an external-command downtime string. - - cmd - Nagios command ID - host - Host schedule downtime on - duration - Minutes to schedule downtime for - author - Name to file the downtime as - comment - Reason for running this command (upgrade, reboot, etc) - start - Start of downtime in seconds since 12:00AM Jan 1 1970 - Default is to use the entry time (now) - svc - Service to schedule downtime for, omit when for host downtime - fixed - Start now if 1, start when a problem is detected if 0 - trigger - Optional ID of event to start downtime from. Leave as 0 for - fixed downtime. - - Syntax: [submitted] COMMAND;;[] - ;;;;;; - - """ - - entry_time = self._now() - if start is None: - start = entry_time - - hdr = "[%s] %s;%s;" % (entry_time, cmd, host) - duration_s = (duration * 60) - end = start + duration_s - - if not author: - author = self.author - - if not comment: - comment = self.comment - - if svc is not None: - dt_args = [svc, str(start), str(end), str(fixed), str(trigger), - str(duration_s), author, comment] - else: - # Downtime for a host if no svc specified - dt_args = [str(start), str(end), str(fixed), str(trigger), - str(duration_s), author, comment] - - dt_arg_str = ";".join(dt_args) - dt_str = hdr + dt_arg_str + "\n" - - return dt_str - - def _fmt_ack_str(self, cmd, host, author=None, - comment=None, svc=None, sticky=0, notify=1, persistent=0): - """ - Format an external-command acknowledge string. - - cmd - Nagios command ID - host - Host schedule downtime on - author - Name to file the downtime as - comment - Reason for running this command (upgrade, reboot, etc) - svc - Service to schedule downtime for, omit when for host downtime - sticky - the acknowledgement will remain until the host returns to an UP state if set to 1 - notify - a notification will be sent out to contacts - persistent - survive across restarts of the Nagios process - - Syntax: [submitted] COMMAND;;[] - ;;;; - """ - - entry_time = self._now() - hdr = "[%s] %s;%s;" % (entry_time, cmd, host) - - if not author: - author = self.author - - if not comment: - comment = self.comment - - if svc is not None: - ack_args = [svc, str(sticky), str(notify), str(persistent), author, comment] - else: - # Downtime for a host if no svc specified - ack_args = [str(sticky), str(notify), str(persistent), author, comment] - - ack_arg_str = ";".join(ack_args) - ack_str = hdr + ack_arg_str + "\n" - - return ack_str - - def _fmt_dt_del_str(self, cmd, host, svc=None, start=None, comment=None): - """ - Format an external-command downtime deletion string. - - cmd - Nagios command ID - host - Host to remove scheduled downtime from - comment - Reason downtime was added (upgrade, reboot, etc) - start - Start of downtime in seconds since 12:00AM Jan 1 1970 - svc - Service to remove downtime for, omit to remove all downtime for the host - - Syntax: [submitted] COMMAND;; - [];[];[] - """ - - entry_time = self._now() - hdr = "[%s] %s;%s;" % (entry_time, cmd, host) - - if comment is None: - comment = self.comment - - dt_del_args = [] - if svc is not None: - dt_del_args.append(svc) - else: - dt_del_args.append('') - - if start is not None: - dt_del_args.append(str(start)) - else: - dt_del_args.append('') - - if comment is not None: - dt_del_args.append(comment) - else: - dt_del_args.append('') - - dt_del_arg_str = ";".join(dt_del_args) - dt_del_str = hdr + dt_del_arg_str + "\n" - - return dt_del_str - - def _fmt_chk_str(self, cmd, host, svc=None, start=None): - """ - Format an external-command forced host or service check string. - - cmd - Nagios command ID - host - Host to check service from - svc - Service to check - start - check time - - Syntax: [submitted] COMMAND;;[]; - """ - - entry_time = self._now() - hdr = "[%s] %s;%s;" % (entry_time, cmd, host) - - if start is None: - start = entry_time + 3 - - if svc is None: - chk_args = [str(start)] - else: - chk_args = [svc, str(start)] - - chk_arg_str = ";".join(chk_args) - chk_str = hdr + chk_arg_str + "\n" - - return chk_str - - def _fmt_notif_str(self, cmd, host=None, svc=None): - """ - Format an external-command notification string. - - cmd - Nagios command ID. - host - Host to en/disable notifications on.. A value is not required - for global downtime - svc - Service to schedule downtime for. A value is not required - for host downtime. - - Syntax: [submitted] COMMAND;[;] - """ - - entry_time = self._now() - notif_str = "[%s] %s" % (entry_time, cmd) - if host is not None: - notif_str += ";%s" % host - - if svc is not None: - notif_str += ";%s" % svc - - notif_str += "\n" - - return notif_str - - def schedule_svc_downtime(self, host, services=None, minutes=30, start=None): - """ - This command is used to schedule downtime for a particular - service. - - During the specified downtime, Nagios will not send - notifications out about the service. - - Syntax: SCHEDULE_SVC_DOWNTIME;; - ;;;;;; - - """ - - cmd = "SCHEDULE_SVC_DOWNTIME" - - if services is None: - services = [] - - for service in services: - dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, start=start, svc=service) - self._write_command(dt_cmd_str) - - def schedule_host_downtime(self, host, minutes=30, start=None): - """ - This command is used to schedule downtime for a particular - host. - - During the specified downtime, Nagios will not send - notifications out about the host. - - Syntax: SCHEDULE_HOST_DOWNTIME;;;; - ;;;; - """ - - cmd = "SCHEDULE_HOST_DOWNTIME" - dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, start=start) - self._write_command(dt_cmd_str) - - def acknowledge_svc_problem(self, host, services=None): - """ - This command is used to acknowledge a particular - service problem. - - By acknowledging the current problem, future notifications - for the same servicestate are disabled - - Syntax: ACKNOWLEDGE_SVC_PROBLEM;;; - ;;;; - """ - - cmd = "ACKNOWLEDGE_SVC_PROBLEM" - - if services is None: - services = [] - - for service in services: - ack_cmd_str = self._fmt_ack_str(cmd, host, svc=service) - self._write_command(ack_cmd_str) - - def acknowledge_host_problem(self, host): - """ - This command is used to acknowledge a particular - host problem. - - By acknowledging the current problem, future notifications - for the same servicestate are disabled - - Syntax: ACKNOWLEDGE_HOST_PROBLEM;;;; - ;; - """ - - cmd = "ACKNOWLEDGE_HOST_PROBLEM" - ack_cmd_str = self._fmt_ack_str(cmd, host) - self._write_command(ack_cmd_str) - - def schedule_forced_host_check(self, host): - """ - This command schedules a forced active check for a particular host. - - Syntax: SCHEDULE_FORCED_HOST_CHECK;; - """ - - cmd = "SCHEDULE_FORCED_HOST_CHECK" - - chk_cmd_str = self._fmt_chk_str(cmd, host, svc=None) - self._write_command(chk_cmd_str) - - def schedule_forced_host_svc_check(self, host): - """ - This command schedules a forced active check for all services - associated with a particular host. - - Syntax: SCHEDULE_FORCED_HOST_SVC_CHECKS;; - """ - - cmd = "SCHEDULE_FORCED_HOST_SVC_CHECKS" - - chk_cmd_str = self._fmt_chk_str(cmd, host, svc=None) - self._write_command(chk_cmd_str) - - def schedule_forced_svc_check(self, host, services=None): - """ - This command schedules a forced active check for a particular - service. - - Syntax: SCHEDULE_FORCED_SVC_CHECK;;; - """ - - cmd = "SCHEDULE_FORCED_SVC_CHECK" - - if services is None: - services = [] - - for service in services: - chk_cmd_str = self._fmt_chk_str(cmd, host, svc=service) - self._write_command(chk_cmd_str) - - def schedule_host_svc_downtime(self, host, minutes=30, start=None): - """ - This command is used to schedule downtime for - all services associated with a particular host. - - During the specified downtime, Nagios will not send - notifications out about the host. - - SCHEDULE_HOST_SVC_DOWNTIME;;;; - ;;;; - """ - - cmd = "SCHEDULE_HOST_SVC_DOWNTIME" - dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, start=start) - self._write_command(dt_cmd_str) - - def delete_host_downtime(self, host, services=None, comment=None): - """ - This command is used to remove scheduled downtime for a particular - host. - - Syntax: DEL_DOWNTIME_BY_HOST_NAME;; - [];[];[] - """ - - cmd = "DEL_DOWNTIME_BY_HOST_NAME" - - if services is None: - dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, comment=comment) - self._write_command(dt_del_cmd_str) - else: - for service in services: - dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, svc=service, comment=comment) - self._write_command(dt_del_cmd_str) - - def schedule_hostgroup_host_downtime(self, hostgroup, minutes=30, start=None): - """ - This command is used to schedule downtime for all hosts in a - particular hostgroup. - - During the specified downtime, Nagios will not send - notifications out about the hosts. - - Syntax: SCHEDULE_HOSTGROUP_HOST_DOWNTIME;;; - ;;;;; - """ - - cmd = "SCHEDULE_HOSTGROUP_HOST_DOWNTIME" - dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes, start=start) - self._write_command(dt_cmd_str) - - def schedule_hostgroup_svc_downtime(self, hostgroup, minutes=30, start=None): - """ - This command is used to schedule downtime for all services in - a particular hostgroup. - - During the specified downtime, Nagios will not send - notifications out about the services. - - Note that scheduling downtime for services does not - automatically schedule downtime for the hosts those services - are associated with. - - Syntax: SCHEDULE_HOSTGROUP_SVC_DOWNTIME;;; - ;;;;; - """ - - cmd = "SCHEDULE_HOSTGROUP_SVC_DOWNTIME" - dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes, start=start) - self._write_command(dt_cmd_str) - - def schedule_servicegroup_host_downtime(self, servicegroup, minutes=30, start=None): - """ - This command is used to schedule downtime for all hosts in a - particular servicegroup. - - During the specified downtime, Nagios will not send - notifications out about the hosts. - - Syntax: SCHEDULE_SERVICEGROUP_HOST_DOWNTIME;; - ;;;;;; - - """ - - cmd = "SCHEDULE_SERVICEGROUP_HOST_DOWNTIME" - dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes, start=start) - self._write_command(dt_cmd_str) - - def schedule_servicegroup_svc_downtime(self, servicegroup, minutes=30, start=None): - """ - This command is used to schedule downtime for all services in - a particular servicegroup. - - During the specified downtime, Nagios will not send - notifications out about the services. - - Note that scheduling downtime for services does not - automatically schedule downtime for the hosts those services - are associated with. - - Syntax: SCHEDULE_SERVICEGROUP_SVC_DOWNTIME;; - ;;;;;; - - """ - - cmd = "SCHEDULE_SERVICEGROUP_SVC_DOWNTIME" - dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes, start=start) - self._write_command(dt_cmd_str) - - def disable_host_svc_notifications(self, host): - """ - This command is used to prevent notifications from being sent - out for all services on the specified host. - - Note that this command does not disable notifications from - being sent out about the host. - - Syntax: DISABLE_HOST_SVC_NOTIFICATIONS; - """ - - cmd = "DISABLE_HOST_SVC_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, host) - self._write_command(notif_str) - - def disable_host_notifications(self, host): - """ - This command is used to prevent notifications from being sent - out for the specified host. - - Note that this command does not disable notifications for - services associated with this host. - - Syntax: DISABLE_HOST_NOTIFICATIONS; - """ - - cmd = "DISABLE_HOST_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, host) - self._write_command(notif_str) - - def disable_svc_notifications(self, host, services=None): - """ - This command is used to prevent notifications from being sent - out for the specified service. - - Note that this command does not disable notifications from - being sent out about the host. - - Syntax: DISABLE_SVC_NOTIFICATIONS;; - """ - - cmd = "DISABLE_SVC_NOTIFICATIONS" - - if services is None: - services = [] - - for service in services: - notif_str = self._fmt_notif_str(cmd, host, svc=service) - self._write_command(notif_str) - - def disable_servicegroup_host_notifications(self, servicegroup): - """ - This command is used to prevent notifications from being sent - out for all hosts in the specified servicegroup. - - Note that this command does not disable notifications for - services associated with hosts in this service group. - - Syntax: DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS; - """ - - cmd = "DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, servicegroup) - self._write_command(notif_str) - - def disable_servicegroup_svc_notifications(self, servicegroup): - """ - This command is used to prevent notifications from being sent - out for all services in the specified servicegroup. - - Note that this does not prevent notifications from being sent - out about the hosts in this servicegroup. - - Syntax: DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS; - """ - - cmd = "DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, servicegroup) - self._write_command(notif_str) - - def disable_hostgroup_host_notifications(self, hostgroup): - """ - Disables notifications for all hosts in a particular - hostgroup. - - Note that this does not disable notifications for the services - associated with the hosts in the hostgroup - see the - DISABLE_HOSTGROUP_SVC_NOTIFICATIONS command for that. - - Syntax: DISABLE_HOSTGROUP_HOST_NOTIFICATIONS; - """ - - cmd = "DISABLE_HOSTGROUP_HOST_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, hostgroup) - self._write_command(notif_str) - - def disable_hostgroup_svc_notifications(self, hostgroup): - """ - Disables notifications for all services associated with hosts - in a particular hostgroup. - - Note that this does not disable notifications for the hosts in - the hostgroup - see the DISABLE_HOSTGROUP_HOST_NOTIFICATIONS - command for that. - - Syntax: DISABLE_HOSTGROUP_SVC_NOTIFICATIONS; - """ - - cmd = "DISABLE_HOSTGROUP_SVC_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, hostgroup) - self._write_command(notif_str) - - def enable_host_notifications(self, host): - """ - Enables notifications for a particular host. - - Note that this command does not enable notifications for - services associated with this host. - - Syntax: ENABLE_HOST_NOTIFICATIONS; - """ - - cmd = "ENABLE_HOST_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, host) - self._write_command(notif_str) - - def enable_host_svc_notifications(self, host): - """ - Enables notifications for all services on the specified host. - - Note that this does not enable notifications for the host. - - Syntax: ENABLE_HOST_SVC_NOTIFICATIONS; - """ - - cmd = "ENABLE_HOST_SVC_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, host) - nagios_return = self._write_command(notif_str) - - if nagios_return: - return notif_str - else: - return "Fail: could not write to the command file" - - def enable_svc_notifications(self, host, services=None): - """ - Enables notifications for a particular service. - - Note that this does not enable notifications for the host. - - Syntax: ENABLE_SVC_NOTIFICATIONS;; - """ - - cmd = "ENABLE_SVC_NOTIFICATIONS" - - if services is None: - services = [] - - nagios_return = True - return_str_list = [] - for service in services: - notif_str = self._fmt_notif_str(cmd, host, svc=service) - nagios_return = self._write_command(notif_str) and nagios_return - return_str_list.append(notif_str) - - if nagios_return: - return return_str_list - else: - return "Fail: could not write to the command file" - - def enable_hostgroup_host_notifications(self, hostgroup): - """ - Enables notifications for all hosts in a particular hostgroup. - - Note that this command does not enable notifications for - services associated with the hosts in this hostgroup. - - Syntax: ENABLE_HOSTGROUP_HOST_NOTIFICATIONS; - """ - - cmd = "ENABLE_HOSTGROUP_HOST_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, hostgroup) - nagios_return = self._write_command(notif_str) - - if nagios_return: - return notif_str - else: - return "Fail: could not write to the command file" - - def enable_hostgroup_svc_notifications(self, hostgroup): - """ - Enables notifications for all services that are associated - with hosts in a particular hostgroup. - - Note that this does not enable notifications for the hosts in - this hostgroup. - - Syntax: ENABLE_HOSTGROUP_SVC_NOTIFICATIONS; - """ - - cmd = "ENABLE_HOSTGROUP_SVC_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, hostgroup) - nagios_return = self._write_command(notif_str) - - if nagios_return: - return notif_str - else: - return "Fail: could not write to the command file" - - def enable_servicegroup_host_notifications(self, servicegroup): - """ - Enables notifications for all hosts that have services that - are members of a particular servicegroup. - - Note that this command does not enable notifications for - services associated with the hosts in this servicegroup. - - Syntax: ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS; - """ - - cmd = "ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, servicegroup) - nagios_return = self._write_command(notif_str) - - if nagios_return: - return notif_str - else: - return "Fail: could not write to the command file" - - def enable_servicegroup_svc_notifications(self, servicegroup): - """ - Enables notifications for all services that are members of a - particular servicegroup. - - Note that this does not enable notifications for the hosts in - this servicegroup. - - Syntax: ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS; - """ - - cmd = "ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, servicegroup) - nagios_return = self._write_command(notif_str) - - if nagios_return: - return notif_str - else: - return "Fail: could not write to the command file" - - def silence_host(self, host): - """ - This command is used to prevent notifications from being sent - out for the host and all services on the specified host. - - This is equivalent to calling disable_host_svc_notifications - and disable_host_notifications. - - Syntax: DISABLE_HOST_SVC_NOTIFICATIONS; - Syntax: DISABLE_HOST_NOTIFICATIONS; - """ - - cmd = [ - "DISABLE_HOST_SVC_NOTIFICATIONS", - "DISABLE_HOST_NOTIFICATIONS" - ] - nagios_return = True - return_str_list = [] - for c in cmd: - notif_str = self._fmt_notif_str(c, host) - nagios_return = self._write_command(notif_str) and nagios_return - return_str_list.append(notif_str) - - if nagios_return: - return return_str_list - else: - return "Fail: could not write to the command file" - - def unsilence_host(self, host): - """ - This command is used to enable notifications for the host and - all services on the specified host. - - This is equivalent to calling enable_host_svc_notifications - and enable_host_notifications. - - Syntax: ENABLE_HOST_SVC_NOTIFICATIONS; - Syntax: ENABLE_HOST_NOTIFICATIONS; - """ - - cmd = [ - "ENABLE_HOST_SVC_NOTIFICATIONS", - "ENABLE_HOST_NOTIFICATIONS" - ] - nagios_return = True - return_str_list = [] - for c in cmd: - notif_str = self._fmt_notif_str(c, host) - nagios_return = self._write_command(notif_str) and nagios_return - return_str_list.append(notif_str) - - if nagios_return: - return return_str_list - else: - return "Fail: could not write to the command file" - - def silence_nagios(self): - """ - This command is used to disable notifications for all hosts and services - in nagios. - - This is a 'SHUT UP, NAGIOS' command - """ - cmd = 'DISABLE_NOTIFICATIONS' - self._write_command(self._fmt_notif_str(cmd)) - - def unsilence_nagios(self): - """ - This command is used to enable notifications for all hosts and services - in nagios. - - This is a 'OK, NAGIOS, GO'' command - """ - cmd = 'ENABLE_NOTIFICATIONS' - self._write_command(self._fmt_notif_str(cmd)) - - def nagios_cmd(self, cmd): - """ - This sends an arbitrary command to nagios - - It prepends the submitted time and appends a \n - - You just have to provide the properly formatted command - """ - - pre = '[%s]' % int(time.time()) - - post = '\n' - cmdstr = '%s %s%s' % (pre, cmd, post) - self._write_command(cmdstr) - - def act(self): - """ - Figure out what you want to do from ansible, and then do the - needful (at the earliest). - """ - # host or service downtime? - if self.action == 'downtime': - if self.services == 'host': - self.schedule_host_downtime(self.host, minutes=self.minutes, - start=self.start) - elif self.services == 'all': - self.schedule_host_svc_downtime(self.host, minutes=self.minutes, - start=self.start) - else: - self.schedule_svc_downtime(self.host, - services=self.services, - minutes=self.minutes, - start=self.start) - - elif self.action == 'acknowledge': - if self.services == 'host': - self.acknowledge_host_problem(self.host) - else: - self.acknowledge_svc_problem(self.host, services=self.services) - - elif self.action == 'delete_downtime': - if self.services == 'host': - self.delete_host_downtime(self.host) - elif self.services == 'all': - self.delete_host_downtime(self.host, comment='') - else: - self.delete_host_downtime(self.host, services=self.services) - - elif self.action == 'forced_check': - if self.services == 'host': - self.schedule_forced_host_check(self.host) - elif self.services == 'all': - self.schedule_forced_host_svc_check(self.host) - else: - self.schedule_forced_svc_check(self.host, services=self.services) - - elif self.action == "servicegroup_host_downtime": - if self.servicegroup: - self.schedule_servicegroup_host_downtime(servicegroup=self.servicegroup, minutes=self.minutes, start=self.start) - elif self.action == "servicegroup_service_downtime": - if self.servicegroup: - self.schedule_servicegroup_svc_downtime(servicegroup=self.servicegroup, minutes=self.minutes, start=self.start) - - # toggle the host AND service alerts - elif self.action == 'silence': - self.silence_host(self.host) - - elif self.action == 'unsilence': - self.unsilence_host(self.host) - - # toggle host/svc alerts - elif self.action == 'enable_alerts': - if self.services == 'host': - self.enable_host_notifications(self.host) - elif self.services == 'all': - self.enable_host_svc_notifications(self.host) - else: - self.enable_svc_notifications(self.host, - services=self.services) - - elif self.action == 'disable_alerts': - if self.services == 'host': - self.disable_host_notifications(self.host) - elif self.services == 'all': - self.disable_host_svc_notifications(self.host) - else: - self.disable_svc_notifications(self.host, - services=self.services) - elif self.action == 'silence_nagios': - self.silence_nagios() - - elif self.action == 'unsilence_nagios': - self.unsilence_nagios() - - elif self.action == 'command': - self.nagios_cmd(self.command) - - # wtf? - else: - self.module.fail_json(msg="unknown action specified: '%s'" % - self.action) - - self.module.exit_json(nagios_commands=self.command_results, - changed=True) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/monitoring/newrelic_deployment.py b/ansible_collections/community/general/plugins/modules/monitoring/newrelic_deployment.py deleted file mode 100644 index af953e0a..00000000 --- a/ansible_collections/community/general/plugins/modules/monitoring/newrelic_deployment.py +++ /dev/null @@ -1,146 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2013 Matt Coddington -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: newrelic_deployment -author: "Matt Coddington (@mcodd)" -short_description: Notify newrelic about app deployments -description: - - Notify newrelic about app deployments (see https://docs.newrelic.com/docs/apm/new-relic-apm/maintenance/deployment-notifications#api) -options: - token: - type: str - description: - - API token, to place in the x-api-key header. - required: true - app_name: - type: str - description: - - (one of app_name or application_id are required) The value of app_name in the newrelic.yml file used by the application - required: false - application_id: - type: str - description: - - (one of app_name or application_id are required) The application id, found in the URL when viewing the application in RPM - required: false - changelog: - type: str - description: - - A list of changes for this deployment - required: false - description: - type: str - description: - - Text annotation for the deployment - notes for you - required: false - revision: - type: str - description: - - A revision number (e.g., git commit SHA) - required: false - user: - type: str - description: - - The name of the user/process that triggered this deployment - required: false - appname: - type: str - description: - - Name of the application - required: false - environment: - type: str - description: - - The environment for this deployment - required: false - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - type: bool - -requirements: [] -''' - -EXAMPLES = ''' -- name: Notify newrelic about an app deployment - community.general.newrelic_deployment: - token: AAAAAA - app_name: myapp - user: ansible deployment - revision: '1.0' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url -from ansible.module_utils.six.moves.urllib.parse import urlencode - -# =========================================== -# Module execution. -# - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - token=dict(required=True, no_log=True), - app_name=dict(required=False), - application_id=dict(required=False), - changelog=dict(required=False), - description=dict(required=False), - revision=dict(required=False), - user=dict(required=False), - appname=dict(required=False), - environment=dict(required=False), - validate_certs=dict(default=True, type='bool'), - ), - required_one_of=[['app_name', 'application_id']], - supports_check_mode=True - ) - - # build list of params - params = {} - if module.params["app_name"] and module.params["application_id"]: - module.fail_json(msg="only one of 'app_name' or 'application_id' can be set") - - if module.params["app_name"]: - params["app_name"] = module.params["app_name"] - elif module.params["application_id"]: - params["application_id"] = module.params["application_id"] - else: - module.fail_json(msg="you must set one of 'app_name' or 'application_id'") - - for item in ["changelog", "description", "revision", "user", "appname", "environment"]: - if module.params[item]: - params[item] = module.params[item] - - # If we're in check mode, just exit pretending like we succeeded - if module.check_mode: - module.exit_json(changed=True) - - # Send the data to NewRelic - url = "https://rpm.newrelic.com/deployments.xml" - data = urlencode(params) - headers = { - 'x-api-key': module.params["token"], - } - response, info = fetch_url(module, url, data=data, headers=headers) - if info['status'] in (200, 201): - module.exit_json(changed=True) - else: - module.fail_json(msg="unable to update newrelic: %s" % info['msg']) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/monitoring/pagerduty.py b/ansible_collections/community/general/plugins/modules/monitoring/pagerduty.py deleted file mode 100644 index dba931ab..00000000 --- a/ansible_collections/community/general/plugins/modules/monitoring/pagerduty.py +++ /dev/null @@ -1,280 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' - -module: pagerduty -short_description: Create PagerDuty maintenance windows -description: - - This module will let you create PagerDuty maintenance windows -author: - - "Andrew Newdigate (@suprememoocow)" - - "Dylan Silva (@thaumos)" - - "Justin Johns (!UNKNOWN)" - - "Bruce Pennypacker (@bpennypacker)" -requirements: - - PagerDuty API access -options: - state: - type: str - description: - - Create a maintenance window or get a list of ongoing windows. - required: true - choices: [ "running", "started", "ongoing", "absent" ] - name: - type: str - description: - - PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API. - user: - type: str - description: - - PagerDuty user ID. Obsolete. Please, use I(token) for authorization. - token: - type: str - description: - - A pagerduty token, generated on the pagerduty site. It is used for authorization. - required: true - requester_id: - type: str - description: - - ID of user making the request. Only needed when creating a maintenance_window. - service: - type: list - elements: str - description: - - A comma separated list of PagerDuty service IDs. - aliases: [ services ] - window_id: - type: str - description: - - ID of maintenance window. Only needed when absent a maintenance_window. - hours: - type: str - description: - - Length of maintenance window in hours. - default: '1' - minutes: - type: str - description: - - Maintenance window in minutes (this is added to the hours). - default: '0' - desc: - type: str - description: - - Short description of maintenance window. - default: Created by Ansible - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - type: bool - default: 'yes' -''' - -EXAMPLES = ''' -- name: List ongoing maintenance windows using a token - community.general.pagerduty: - name: companyabc - token: xxxxxxxxxxxxxx - state: ongoing - -- name: Create a 1 hour maintenance window for service FOO123 - community.general.pagerduty: - name: companyabc - user: example@example.com - token: yourtoken - state: running - service: FOO123 - -- name: Create a 5 minute maintenance window for service FOO123 - community.general.pagerduty: - name: companyabc - token: xxxxxxxxxxxxxx - hours: 0 - minutes: 5 - state: running - service: FOO123 - - -- name: Create a 4 hour maintenance window for service FOO123 with the description "deployment" - community.general.pagerduty: - name: companyabc - user: example@example.com - state: running - service: FOO123 - hours: 4 - desc: deployment - register: pd_window - -- name: Delete the previous maintenance window - community.general.pagerduty: - name: companyabc - user: example@example.com - state: absent - window_id: '{{ pd_window.result.maintenance_window.id }}' - -# Delete a maintenance window from a separate playbook than its creation, -# and if it is the only existing maintenance window -- name: Check - community.general.pagerduty: - requester_id: XXXXXXX - token: yourtoken - state: ongoing - register: pd_window - -- name: Delete - community.general.pagerduty: - requester_id: XXXXXXX - token: yourtoken - state: absent - window_id: "{{ pd_window.result.maintenance_windows[0].id }}" -''' - -import datetime -import json - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url - - -class PagerDutyRequest(object): - def __init__(self, module, name, user, token): - self.module = module - self.name = name - self.user = user - self.token = token - self.headers = { - 'Content-Type': 'application/json', - "Authorization": self._auth_header(), - 'Accept': 'application/vnd.pagerduty+json;version=2' - } - - def ongoing(self, http_call=fetch_url): - url = "https://api.pagerduty.com/maintenance_windows?filter=ongoing" - headers = dict(self.headers) - - response, info = http_call(self.module, url, headers=headers) - if info['status'] != 200: - self.module.fail_json(msg="failed to lookup the ongoing window: %s" % info['msg']) - - json_out = self._read_response(response) - - return False, json_out, False - - def create(self, requester_id, service, hours, minutes, desc, http_call=fetch_url): - if not requester_id: - self.module.fail_json(msg="requester_id is required when maintenance window should be created") - - url = 'https://api.pagerduty.com/maintenance_windows' - - headers = dict(self.headers) - headers.update({'From': requester_id}) - - start, end = self._compute_start_end_time(hours, minutes) - services = self._create_services_payload(service) - - request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'services': services}} - - data = json.dumps(request_data) - response, info = http_call(self.module, url, data=data, headers=headers, method='POST') - if info['status'] != 201: - self.module.fail_json(msg="failed to create the window: %s" % info['msg']) - - json_out = self._read_response(response) - - return False, json_out, True - - def _create_services_payload(self, service): - if (isinstance(service, list)): - return [{'id': s, 'type': 'service_reference'} for s in service] - else: - return [{'id': service, 'type': 'service_reference'}] - - def _compute_start_end_time(self, hours, minutes): - now = datetime.datetime.utcnow() - later = now + datetime.timedelta(hours=int(hours), minutes=int(minutes)) - start = now.strftime("%Y-%m-%dT%H:%M:%SZ") - end = later.strftime("%Y-%m-%dT%H:%M:%SZ") - return start, end - - def absent(self, window_id, http_call=fetch_url): - url = "https://api.pagerduty.com/maintenance_windows/" + window_id - headers = dict(self.headers) - - response, info = http_call(self.module, url, headers=headers, method='DELETE') - if info['status'] != 204: - self.module.fail_json(msg="failed to delete the window: %s" % info['msg']) - - json_out = self._read_response(response) - - return False, json_out, True - - def _auth_header(self): - return "Token token=%s" % self.token - - def _read_response(self, response): - try: - return json.loads(response.read()) - except Exception: - return "" - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - state=dict(required=True, choices=['running', 'started', 'ongoing', 'absent']), - name=dict(required=False), - user=dict(required=False), - token=dict(required=True, no_log=True), - service=dict(required=False, type='list', elements='str', aliases=["services"]), - window_id=dict(required=False), - requester_id=dict(required=False), - hours=dict(default='1', required=False), # @TODO change to int? - minutes=dict(default='0', required=False), # @TODO change to int? - desc=dict(default='Created by Ansible', required=False), - validate_certs=dict(default=True, type='bool'), - ) - ) - - state = module.params['state'] - name = module.params['name'] - user = module.params['user'] - service = module.params['service'] - window_id = module.params['window_id'] - hours = module.params['hours'] - minutes = module.params['minutes'] - token = module.params['token'] - desc = module.params['desc'] - requester_id = module.params['requester_id'] - - pd = PagerDutyRequest(module, name, user, token) - - if state == "running" or state == "started": - if not service: - module.fail_json(msg="service not specified") - (rc, out, changed) = pd.create(requester_id, service, hours, minutes, desc) - if rc == 0: - changed = True - - if state == "ongoing": - (rc, out, changed) = pd.ongoing() - - if state == "absent": - (rc, out, changed) = pd.absent(window_id) - - if rc != 0: - module.fail_json(msg="failed", result=out) - - module.exit_json(msg="success", result=out, changed=changed) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_alert.py b/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_alert.py deleted file mode 100644 index 58a1f260..00000000 --- a/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_alert.py +++ /dev/null @@ -1,255 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' - -module: pagerduty_alert -short_description: Trigger, acknowledge or resolve PagerDuty incidents -description: - - This module will let you trigger, acknowledge or resolve a PagerDuty incident by sending events -author: - - "Amanpreet Singh (@ApsOps)" -requirements: - - PagerDuty API access -options: - name: - type: str - description: - - PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API. - service_id: - type: str - description: - - ID of PagerDuty service when incidents will be triggered, acknowledged or resolved. - required: true - service_key: - type: str - description: - - The GUID of one of your "Generic API" services. Obsolete. Please use I(integration_key). - integration_key: - type: str - description: - - The GUID of one of your "Generic API" services. - - This is the "integration key" listed on a "Integrations" tab of PagerDuty service. - state: - type: str - description: - - Type of event to be sent. - required: true - choices: - - 'triggered' - - 'acknowledged' - - 'resolved' - api_key: - type: str - description: - - The pagerduty API key (readonly access), generated on the pagerduty site. - required: true - desc: - type: str - description: - - For C(triggered) I(state) - Required. Short description of the problem that led to this trigger. This field (or a truncated version) - will be used when generating phone calls, SMS messages and alert emails. It will also appear on the incidents tables in the PagerDuty UI. - The maximum length is 1024 characters. - - For C(acknowledged) or C(resolved) I(state) - Text that will appear in the incident's log associated with this event. - required: false - default: Created via Ansible - incident_key: - type: str - description: - - Identifies the incident to which this I(state) should be applied. - - For C(triggered) I(state) - If there's no open (i.e. unresolved) incident with this key, a new one will be created. If there's already an - open incident with a matching key, this event will be appended to that incident's log. The event key provides an easy way to "de-dup" - problem reports. - - For C(acknowledged) or C(resolved) I(state) - This should be the incident_key you received back when the incident was first opened by a - trigger event. Acknowledge events referencing resolved or nonexistent incidents will be discarded. - required: false - client: - type: str - description: - - The name of the monitoring client that is triggering this event. - required: false - client_url: - type: str - description: - - The URL of the monitoring client that is triggering this event. - required: false -''' - -EXAMPLES = ''' -- name: Trigger an incident with just the basic options - community.general.pagerduty_alert: - name: companyabc - integration_key: xxx - api_key: yourapikey - service_id: PDservice - state: triggered - desc: problem that led to this trigger - -- name: Trigger an incident with more options - community.general.pagerduty_alert: - integration_key: xxx - api_key: yourapikey - service_id: PDservice - state: triggered - desc: problem that led to this trigger - incident_key: somekey - client: Sample Monitoring Service - client_url: http://service.example.com - -- name: Acknowledge an incident based on incident_key - community.general.pagerduty_alert: - integration_key: xxx - api_key: yourapikey - service_id: PDservice - state: acknowledged - incident_key: somekey - desc: "some text for incident's log" - -- name: Resolve an incident based on incident_key - community.general.pagerduty_alert: - integration_key: xxx - api_key: yourapikey - service_id: PDservice - state: resolved - incident_key: somekey - desc: "some text for incident's log" -''' -import json - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url -from ansible.module_utils.six.moves.urllib.parse import urlparse, urlencode, urlunparse - - -def check(module, name, state, service_id, integration_key, api_key, incident_key=None, http_call=fetch_url): - url = 'https://api.pagerduty.com/incidents' - headers = { - "Content-type": "application/json", - "Authorization": "Token token=%s" % api_key, - 'Accept': 'application/vnd.pagerduty+json;version=2' - } - - params = { - 'service_ids[]': service_id, - 'sort_by': 'incident_number:desc', - 'time_zone': 'UTC' - } - if incident_key: - params['incident_key'] = incident_key - - url_parts = list(urlparse(url)) - url_parts[4] = urlencode(params, True) - - url = urlunparse(url_parts) - - response, info = http_call(module, url, method='get', headers=headers) - - if info['status'] != 200: - module.fail_json(msg="failed to check current incident status." - "Reason: %s" % info['msg']) - - incidents = json.loads(response.read())["incidents"] - msg = "No corresponding incident" - - if len(incidents) == 0: - if state in ('acknowledged', 'resolved'): - return msg, False - return msg, True - elif state != incidents[0]["status"]: - return incidents[0], True - - return incidents[0], False - - -def send_event(module, service_key, event_type, desc, - incident_key=None, client=None, client_url=None): - url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json" - headers = { - "Content-type": "application/json" - } - - data = { - "service_key": service_key, - "event_type": event_type, - "incident_key": incident_key, - "description": desc, - "client": client, - "client_url": client_url - } - - response, info = fetch_url(module, url, method='post', - headers=headers, data=json.dumps(data)) - if info['status'] != 200: - module.fail_json(msg="failed to %s. Reason: %s" % - (event_type, info['msg'])) - json_out = json.loads(response.read()) - return json_out - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=False), - service_id=dict(required=True), - service_key=dict(required=False, no_log=True), - integration_key=dict(required=False, no_log=True), - api_key=dict(required=True, no_log=True), - state=dict(required=True, - choices=['triggered', 'acknowledged', 'resolved']), - client=dict(required=False, default=None), - client_url=dict(required=False, default=None), - desc=dict(required=False, default='Created via Ansible'), - incident_key=dict(required=False, default=None, no_log=False) - ), - supports_check_mode=True - ) - - name = module.params['name'] - service_id = module.params['service_id'] - integration_key = module.params['integration_key'] - service_key = module.params['service_key'] - api_key = module.params['api_key'] - state = module.params['state'] - client = module.params['client'] - client_url = module.params['client_url'] - desc = module.params['desc'] - incident_key = module.params['incident_key'] - - if integration_key is None: - if service_key is not None: - integration_key = service_key - module.warn('"service_key" is obsolete parameter and will be removed.' - ' Please, use "integration_key" instead') - else: - module.fail_json(msg="'integration_key' is required parameter") - - state_event_dict = { - 'triggered': 'trigger', - 'acknowledged': 'acknowledge', - 'resolved': 'resolve' - } - - event_type = state_event_dict[state] - - if event_type != 'trigger' and incident_key is None: - module.fail_json(msg="incident_key is required for " - "acknowledge or resolve events") - - out, changed = check(module, name, state, service_id, - integration_key, api_key, incident_key) - - if not module.check_mode and changed is True: - out = send_event(module, integration_key, event_type, desc, - incident_key, client, client_url) - - module.exit_json(result=out, changed=changed) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_change.py b/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_change.py deleted file mode 100644 index 358a6961..00000000 --- a/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_change.py +++ /dev/null @@ -1,192 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2020, Adam Vaughan (@adamvaughan) avaughan@pagerduty.com -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: pagerduty_change -short_description: Track a code or infrastructure change as a PagerDuty change event -version_added: 1.3.0 -description: - - This module will let you create a PagerDuty change event each time the module is run. - - This is not an idempotent action and a new change event will be created each time it is run. -author: - - Adam Vaughan (@adamvaughan) -requirements: - - PagerDuty integration key -options: - integration_key: - description: - - The integration key that identifies the service the change was made to. - This can be found by adding an integration to a service in PagerDuty. - required: true - type: str - summary: - description: - - A short description of the change that occurred. - required: true - type: str - source: - description: - - The source of the change event. - default: Ansible - type: str - user: - description: - - The name of the user or process that triggered this deployment. - type: str - repo: - description: - - The URL of the project repository. - required: false - type: str - revision: - description: - - An identifier of the revision being deployed, typically a number or SHA from a version control system. - required: false - type: str - environment: - description: - - The environment name, typically C(production), C(staging), etc. - required: false - type: str - link_url: - description: - - A URL where more information about the deployment can be obtained. - required: false - type: str - link_text: - description: - - Descriptive text for a URL where more information about the deployment can be obtained. - required: false - type: str - url: - description: - - URL to submit the change event to. - required: false - default: https://events.pagerduty.com/v2/change/enqueue - type: str - validate_certs: - description: - - If C(no), SSL certificates for the target URL will not be validated. - This should only be used on personally controlled sites using self-signed certificates. - required: false - default: yes - type: bool -notes: - - Supports C(check_mode). Note that check mode simply does nothing except returning C(changed=true) in case the I(url) seems to be correct. -''' - -EXAMPLES = ''' -- name: Track the deployment as a PagerDuty change event - community.general.pagerduty_change: - integration_key: abc123abc123abc123abc123abc123ab - summary: The application was deployed - -- name: Track the deployment as a PagerDuty change event with more details - community.general.pagerduty_change: - integration_key: abc123abc123abc123abc123abc123ab - summary: The application was deployed - source: Ansible Deploy - user: ansible - repo: github.com/ansible/ansible - revision: '4.2' - environment: production - link_url: https://github.com/ansible-collections/community.general/pull/1269 - link_text: View changes on GitHub -''' - -from ansible.module_utils.urls import fetch_url -from ansible.module_utils.basic import AnsibleModule -from datetime import datetime - - -def main(): - module = AnsibleModule( - argument_spec=dict( - integration_key=dict(required=True, type='str', no_log=True), - summary=dict(required=True, type='str'), - source=dict(required=False, default='Ansible', type='str'), - user=dict(required=False, type='str'), - repo=dict(required=False, type='str'), - revision=dict(required=False, type='str'), - environment=dict(required=False, type='str'), - link_url=dict(required=False, type='str'), - link_text=dict(required=False, type='str'), - url=dict(required=False, - default='https://events.pagerduty.com/v2/change/enqueue', type='str'), - validate_certs=dict(default=True, type='bool') - ), - supports_check_mode=True - ) - - # API documented at https://developer.pagerduty.com/docs/events-api-v2/send-change-events/ - - url = module.params['url'] - headers = {'Content-Type': 'application/json'} - - if module.check_mode: - _response, info = fetch_url( - module, url, headers=headers, method='POST') - - if info['status'] == 400: - module.exit_json(changed=True) - else: - module.fail_json( - msg='Checking the PagerDuty change event API returned an unexpected response: %d' % (info['status'])) - - custom_details = {} - - if module.params['user']: - custom_details['user'] = module.params['user'] - - if module.params['repo']: - custom_details['repo'] = module.params['repo'] - - if module.params['revision']: - custom_details['revision'] = module.params['revision'] - - if module.params['environment']: - custom_details['environment'] = module.params['environment'] - - now = datetime.utcnow() - timestamp = now.strftime("%Y-%m-%dT%H:%M:%S.%fZ") - - payload = { - 'summary': module.params['summary'], - 'source': module.params['source'], - 'timestamp': timestamp, - 'custom_details': custom_details - } - - event = { - 'routing_key': module.params['integration_key'], - 'payload': payload - } - - if module.params['link_url']: - link = { - 'href': module.params['link_url'] - } - - if module.params['link_text']: - link['text'] = module.params['link_text'] - - event['links'] = [link] - - _response, info = fetch_url( - module, url, data=module.jsonify(event), headers=headers, method='POST') - - if info['status'] == 202: - module.exit_json(changed=True) - else: - module.fail_json( - msg='Creating PagerDuty change event failed with %d' % (info['status'])) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_user.py b/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_user.py deleted file mode 100644 index 4b20a321..00000000 --- a/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_user.py +++ /dev/null @@ -1,263 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2020, Zainab Alsaffar -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: pagerduty_user -short_description: Manage a user account on PagerDuty -description: - - This module manages the creation/removal of a user account on PagerDuty. -version_added: '1.3.0' -author: Zainab Alsaffar (@zanssa) -requirements: - - pdpyras python module = 4.1.1 - - PagerDuty API Access -options: - access_token: - description: - - An API access token to authenticate with the PagerDuty REST API. - required: true - type: str - pd_user: - description: - - Name of the user in PagerDuty. - required: true - type: str - pd_email: - description: - - The user's email address. - - I(pd_email) is the unique identifier used and cannot be updated using this module. - required: true - type: str - pd_role: - description: - - The user's role. - choices: ['global_admin', 'manager', 'responder', 'observer', 'stakeholder', 'limited_stakeholder', 'restricted_access'] - default: 'responder' - type: str - state: - description: - - State of the user. - - On C(present), it creates a user if the user doesn't exist. - - On C(absent), it removes a user if the account exists. - choices: ['present', 'absent'] - default: 'present' - type: str - pd_teams: - description: - - The teams to which the user belongs. - - Required if I(state=present). - type: list - elements: str -notes: - - Supports C(check_mode). -''' - -EXAMPLES = r''' -- name: Create a user account on PagerDuty - community.general.pagerduty_user: - access_token: 'Your_Access_token' - pd_user: user_full_name - pd_email: user_email - pd_role: user_pd_role - pd_teams: user_pd_teams - state: "present" - -- name: Remove a user account from PagerDuty - community.general.pagerduty_user: - access_token: 'Your_Access_token' - pd_user: user_full_name - pd_email: user_email - state: "absent" -''' - -RETURN = r''' # ''' - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -import traceback -from os import path - -try: - from pdpyras import APISession - HAS_PD_PY = True -except ImportError: - HAS_PD_PY = False - PD_IMPORT_ERR = traceback.format_exc() - -try: - from pdpyras import PDClientError - HAS_PD_CLIENT_ERR = True -except ImportError: - HAS_PD_CLIENT_ERR = False - PD_CLIENT_ERR_IMPORT_ERR = traceback.format_exc() - - -class PagerDutyUser(object): - def __init__(self, module, session): - self._module = module - self._apisession = session - - # check if the user exists - def does_user_exist(self, pd_email): - for user in self._apisession.iter_all('users'): - if user['email'] == pd_email: - return user['id'] - - # create a user account on PD - def add_pd_user(self, pd_name, pd_email, pd_role): - try: - user = self._apisession.persist('users', 'email', { - "name": pd_name, - "email": pd_email, - "type": "user", - "role": pd_role, - }) - return user - - except PDClientError as e: - if e.response.status_code == 400: - self._module.fail_json( - msg="Failed to add %s due to invalid argument" % (pd_name)) - if e.response.status_code == 401: - self._module.fail_json(msg="Failed to add %s due to invalid API key" % (pd_name)) - if e.response.status_code == 402: - self._module.fail_json( - msg="Failed to add %s due to inability to perform the action within the API token" % (pd_name)) - if e.response.status_code == 403: - self._module.fail_json( - msg="Failed to add %s due to inability to review the requested resource within the API token" % (pd_name)) - if e.response.status_code == 429: - self._module.fail_json( - msg="Failed to add %s due to reaching the limit of making requests" % (pd_name)) - - # delete a user account from PD - def delete_user(self, pd_user_id, pd_name): - try: - user_path = path.join('/users/', pd_user_id) - self._apisession.rdelete(user_path) - - except PDClientError as e: - if e.response.status_code == 404: - self._module.fail_json( - msg="Failed to remove %s as user was not found" % (pd_name)) - if e.response.status_code == 403: - self._module.fail_json( - msg="Failed to remove %s due to inability to review the requested resource within the API token" % (pd_name)) - if e.response.status_code == 401: - # print out the list of incidents - pd_incidents = self.get_incidents_assigned_to_user(pd_user_id) - self._module.fail_json(msg="Failed to remove %s as user has assigned incidents %s" % (pd_name, pd_incidents)) - if e.response.status_code == 429: - self._module.fail_json( - msg="Failed to remove %s due to reaching the limit of making requests" % (pd_name)) - - # get incidents assigned to a user - def get_incidents_assigned_to_user(self, pd_user_id): - incident_info = {} - incidents = self._apisession.list_all('incidents', params={'user_ids[]': [pd_user_id]}) - - for incident in incidents: - incident_info = { - 'title': incident['title'], - 'key': incident['incident_key'], - 'status': incident['status'] - } - return incident_info - - # add a user to a team/teams - def add_user_to_teams(self, pd_user_id, pd_teams, pd_role): - updated_team = None - for team in pd_teams: - team_info = self._apisession.find('teams', team, attribute='name') - if team_info is not None: - try: - updated_team = self._apisession.rput('/teams/' + team_info['id'] + '/users/' + pd_user_id, json={ - 'role': pd_role - }) - except PDClientError: - updated_team = None - return updated_team - - -def main(): - module = AnsibleModule( - argument_spec=dict( - access_token=dict(type='str', required=True, no_log=True), - pd_user=dict(type='str', required=True), - pd_email=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['present', 'absent']), - pd_role=dict(type='str', default='responder', - choices=['global_admin', 'manager', 'responder', 'observer', 'stakeholder', 'limited_stakeholder', 'restricted_access']), - pd_teams=dict(type='list', elements='str', required=False)), - required_if=[['state', 'present', ['pd_teams']], ], - supports_check_mode=True, - ) - - if not HAS_PD_PY: - module.fail_json(msg=missing_required_lib('pdpyras', url='https://github.com/PagerDuty/pdpyras'), exception=PD_IMPORT_ERR) - - if not HAS_PD_CLIENT_ERR: - module.fail_json(msg=missing_required_lib('PDClientError', url='https://github.com/PagerDuty/pdpyras'), exception=PD_CLIENT_ERR_IMPORT_ERR) - - access_token = module.params['access_token'] - pd_user = module.params['pd_user'] - pd_email = module.params['pd_email'] - state = module.params['state'] - pd_role = module.params['pd_role'] - pd_teams = module.params['pd_teams'] - - if pd_role: - pd_role_gui_value = { - 'global_admin': 'admin', - 'manager': 'user', - 'responder': 'limited_user', - 'observer': 'observer', - 'stakeholder': 'read_only_user', - 'limited_stakeholder': 'read_only_limited_user', - 'restricted_access': 'restricted_access' - } - pd_role = pd_role_gui_value[pd_role] - - # authenticate with PD API - try: - session = APISession(access_token) - except PDClientError as e: - module.fail_json(msg="Failed to authenticate with PagerDuty: %s" % e) - - user = PagerDutyUser(module, session) - - user_exists = user.does_user_exist(pd_email) - - if user_exists: - if state == "absent": - # remove user - if not module.check_mode: - user.delete_user(user_exists, pd_user) - module.exit_json(changed=True, result="Successfully deleted user %s" % pd_user) - else: - module.exit_json(changed=False, result="User %s already exists." % pd_user) - - # in case that the user does not exist - else: - if state == "absent": - module.exit_json(changed=False, result="User %s was not found." % pd_user) - - else: - # add user, adds user with the default notification rule and contact info (email) - if not module.check_mode: - user.add_pd_user(pd_user, pd_email, pd_role) - # get user's id - pd_user_id = user.does_user_exist(pd_email) - # add a user to the team/s - user.add_user_to_teams(pd_user_id, pd_teams, pd_role) - module.exit_json(changed=True, result="Successfully created & added user %s to team %s" % (pd_user, pd_teams)) - - -if __name__ == "__main__": - main() diff --git a/ansible_collections/community/general/plugins/modules/monitoring/pingdom.py b/ansible_collections/community/general/plugins/modules/monitoring/pingdom.py deleted file mode 100644 index 23ed2545..00000000 --- a/ansible_collections/community/general/plugins/modules/monitoring/pingdom.py +++ /dev/null @@ -1,141 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' - -module: pingdom -short_description: Pause/unpause Pingdom alerts -description: - - This module will let you pause/unpause Pingdom alerts -author: - - "Dylan Silva (@thaumos)" - - "Justin Johns (!UNKNOWN)" -requirements: - - "This pingdom python library: https://github.com/mbabineau/pingdom-python" -options: - state: - type: str - description: - - Define whether or not the check should be running or paused. - required: true - choices: [ "running", "paused", "started", "stopped" ] - checkid: - type: str - description: - - Pingdom ID of the check. - required: true - uid: - type: str - description: - - Pingdom user ID. - required: true - passwd: - type: str - description: - - Pingdom user password. - required: true - key: - type: str - description: - - Pingdom API key. - required: true -notes: - - This module does not yet have support to add/remove checks. -''' - -EXAMPLES = ''' -- name: Pause the check with the ID of 12345 - community.general.pingdom: - uid: example@example.com - passwd: password123 - key: apipassword123 - checkid: 12345 - state: paused - -- name: Unpause the check with the ID of 12345 - community.general.pingdom: - uid: example@example.com - passwd: password123 - key: apipassword123 - checkid: 12345 - state: running -''' - -import traceback - -PINGDOM_IMP_ERR = None -try: - import pingdom - HAS_PINGDOM = True -except Exception: - PINGDOM_IMP_ERR = traceback.format_exc() - HAS_PINGDOM = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -def pause(checkid, uid, passwd, key): - - c = pingdom.PingdomConnection(uid, passwd, key) - c.modify_check(checkid, paused=True) - check = c.get_check(checkid) - name = check.name - result = check.status - # if result != "paused": # api output buggy - accept raw exception for now - # return (True, name, result) - return (False, name, result) - - -def unpause(checkid, uid, passwd, key): - - c = pingdom.PingdomConnection(uid, passwd, key) - c.modify_check(checkid, paused=False) - check = c.get_check(checkid) - name = check.name - result = check.status - # if result != "up": # api output buggy - accept raw exception for now - # return (True, name, result) - return (False, name, result) - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - state=dict(required=True, choices=['running', 'paused', 'started', 'stopped']), - checkid=dict(required=True), - uid=dict(required=True), - passwd=dict(required=True, no_log=True), - key=dict(required=True, no_log=True), - ) - ) - - if not HAS_PINGDOM: - module.fail_json(msg=missing_required_lib("pingdom"), exception=PINGDOM_IMP_ERR) - - checkid = module.params['checkid'] - state = module.params['state'] - uid = module.params['uid'] - passwd = module.params['passwd'] - key = module.params['key'] - - if (state == "paused" or state == "stopped"): - (rc, name, result) = pause(checkid, uid, passwd, key) - - if (state == "running" or state == "started"): - (rc, name, result) = unpause(checkid, uid, passwd, key) - - if rc != 0: - module.fail_json(checkid=checkid, name=name, status=result) - - module.exit_json(checkid=checkid, name=name, status=result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/monitoring/rollbar_deployment.py b/ansible_collections/community/general/plugins/modules/monitoring/rollbar_deployment.py deleted file mode 100644 index cea3bfdf..00000000 --- a/ansible_collections/community/general/plugins/modules/monitoring/rollbar_deployment.py +++ /dev/null @@ -1,143 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2014, Max Riveiro, -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rollbar_deployment -author: "Max Riveiro (@kavu)" -short_description: Notify Rollbar about app deployments -description: - - Notify Rollbar about app deployments - (see https://rollbar.com/docs/deploys_other/) -options: - token: - type: str - description: - - Your project access token. - required: true - environment: - type: str - description: - - Name of the environment being deployed, e.g. 'production'. - required: true - revision: - type: str - description: - - Revision number/sha being deployed. - required: true - user: - type: str - description: - - User who deployed. - required: false - rollbar_user: - type: str - description: - - Rollbar username of the user who deployed. - required: false - comment: - type: str - description: - - Deploy comment (e.g. what is being deployed). - required: false - url: - type: str - description: - - Optional URL to submit the notification to. - required: false - default: 'https://api.rollbar.com/api/1/deploy/' - validate_certs: - description: - - If C(no), SSL certificates for the target url will not be validated. - This should only be used on personally controlled sites using - self-signed certificates. - required: false - default: 'yes' - type: bool -''' - -EXAMPLES = ''' - - name: Rollbar deployment notification - community.general.rollbar_deployment: - token: AAAAAA - environment: staging - user: ansible - revision: '4.2' - rollbar_user: admin - comment: Test Deploy - - - name: Notify rollbar about current git revision deployment by current user - community.general.rollbar_deployment: - token: "{{ rollbar_access_token }}" - environment: production - revision: "{{ lookup('pipe', 'git rev-parse HEAD') }}" - user: "{{ lookup('env', 'USER') }}" -''' -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.urls import fetch_url - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - token=dict(required=True, no_log=True), - environment=dict(required=True), - revision=dict(required=True), - user=dict(required=False), - rollbar_user=dict(required=False), - comment=dict(required=False), - url=dict( - required=False, - default='https://api.rollbar.com/api/1/deploy/' - ), - validate_certs=dict(default=True, type='bool'), - ), - supports_check_mode=True - ) - - if module.check_mode: - module.exit_json(changed=True) - - params = dict( - access_token=module.params['token'], - environment=module.params['environment'], - revision=module.params['revision'] - ) - - if module.params['user']: - params['local_username'] = module.params['user'] - - if module.params['rollbar_user']: - params['rollbar_username'] = module.params['rollbar_user'] - - if module.params['comment']: - params['comment'] = module.params['comment'] - - url = module.params.get('url') - - try: - data = urlencode(params) - response, info = fetch_url(module, url, data=data, method='POST') - except Exception as e: - module.fail_json(msg='Unable to notify Rollbar: %s' % to_native(e), exception=traceback.format_exc()) - else: - if info['status'] == 200: - module.exit_json(changed=True) - else: - module.fail_json(msg='HTTP result code: %d connecting to %s' % (info['status'], url)) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_check.py b/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_check.py deleted file mode 100644 index ec43b60a..00000000 --- a/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_check.py +++ /dev/null @@ -1,372 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Anders Ingemann -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: sensu_check -short_description: Manage Sensu checks -description: - - Manage the checks that should be run on a machine by I(Sensu). - - Most options do not have a default and will not be added to the check definition unless specified. - - All defaults except I(path), I(state), I(backup) and I(metric) are not managed by this module, - - they are simply specified for your convenience. -options: - name: - type: str - description: - - The name of the check - - This is the key that is used to determine whether a check exists - required: true - state: - type: str - description: - - Whether the check should be present or not - choices: [ 'present', 'absent' ] - default: present - path: - type: str - description: - - Path to the json file of the check to be added/removed. - - Will be created if it does not exist (unless I(state=absent)). - - The parent folders need to exist when I(state=present), otherwise an error will be thrown - default: /etc/sensu/conf.d/checks.json - backup: - description: - - Create a backup file (if yes), including the timestamp information so - - you can get the original file back if you somehow clobbered it incorrectly. - type: bool - default: 'no' - command: - type: str - description: - - Path to the sensu check to run (not required when I(state=absent)) - handlers: - type: list - elements: str - description: - - List of handlers to notify when the check fails - default: [] - subscribers: - type: list - elements: str - description: - - List of subscribers/channels this check should run for - - See sensu_subscribers to subscribe a machine to a channel - default: [] - interval: - type: int - description: - - Check interval in seconds - timeout: - type: int - description: - - Timeout for the check - - If not specified, it defaults to 10. - ttl: - type: int - description: - - Time to live in seconds until the check is considered stale - handle: - description: - - Whether the check should be handled or not - - Default is C(false). - type: bool - subdue_begin: - type: str - description: - - When to disable handling of check failures - subdue_end: - type: str - description: - - When to enable handling of check failures - dependencies: - type: list - elements: str - description: - - Other checks this check depends on, if dependencies fail handling of this check will be disabled - default: [] - metric: - description: - - Whether the check is a metric - type: bool - default: 'no' - standalone: - description: - - Whether the check should be scheduled by the sensu client or server - - This option obviates the need for specifying the I(subscribers) option - - Default is C(false). - type: bool - publish: - description: - - Whether the check should be scheduled at all. - - You can still issue it via the sensu api - - Default is C(false). - type: bool - occurrences: - type: int - description: - - Number of event occurrences before the handler should take action - - If not specified, defaults to 1. - refresh: - type: int - description: - - Number of seconds handlers should wait before taking second action - aggregate: - description: - - Classifies the check as an aggregate check, - - making it available via the aggregate API - - Default is C(false). - type: bool - low_flap_threshold: - type: int - description: - - The low threshold for flap detection - high_flap_threshold: - type: int - description: - - The high threshold for flap detection - custom: - type: dict - description: - - A hash/dictionary of custom parameters for mixing to the configuration. - - You can't rewrite others module parameters using this - default: {} - source: - type: str - description: - - The check source, used to create a JIT Sensu client for an external resource (e.g. a network switch). -author: "Anders Ingemann (@andsens)" -''' - -EXAMPLES = ''' -# Fetch metrics about the CPU load every 60 seconds, -# the sensu server has a handler called 'relay' which forwards stats to graphite -- name: Get cpu metrics - community.general.sensu_check: - name: cpu_load - command: /etc/sensu/plugins/system/cpu-mpstat-metrics.rb - metric: yes - handlers: relay - subscribers: common - interval: 60 - -# Check whether nginx is running -- name: Check nginx process - community.general.sensu_check: - name: nginx_running - command: /etc/sensu/plugins/processes/check-procs.rb -f /var/run/nginx.pid - handlers: default - subscribers: nginx - interval: 60 - -# Stop monitoring the disk capacity. -# Note that the check will still show up in the sensu dashboard, -# to remove it completely you need to issue a DELETE request to the sensu api. -- name: Check disk - community.general.sensu_check: - name: check_disk_capacity - state: absent -''' - -import json -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - - -def sensu_check(module, path, name, state='present', backup=False): - changed = False - reasons = [] - - stream = None - try: - try: - stream = open(path, 'r') - config = json.load(stream) - except IOError as e: - if e.errno == 2: # File not found, non-fatal - if state == 'absent': - reasons.append('file did not exist and state is `absent\'') - return changed, reasons - config = {} - else: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - except ValueError: - msg = '{path} contains invalid JSON'.format(path=path) - module.fail_json(msg=msg) - finally: - if stream: - stream.close() - - if 'checks' not in config: - if state == 'absent': - reasons.append('`checks\' section did not exist and state is `absent\'') - return changed, reasons - config['checks'] = {} - changed = True - reasons.append('`checks\' section did not exist') - - if state == 'absent': - if name in config['checks']: - del config['checks'][name] - changed = True - reasons.append('check was present and state is `absent\'') - - if state == 'present': - if name not in config['checks']: - check = {} - config['checks'][name] = check - changed = True - reasons.append('check was absent and state is `present\'') - else: - check = config['checks'][name] - simple_opts = ['command', - 'handlers', - 'subscribers', - 'interval', - 'timeout', - 'ttl', - 'handle', - 'dependencies', - 'standalone', - 'publish', - 'occurrences', - 'refresh', - 'aggregate', - 'low_flap_threshold', - 'high_flap_threshold', - 'source', - ] - for opt in simple_opts: - if module.params[opt] is not None: - if opt not in check or check[opt] != module.params[opt]: - check[opt] = module.params[opt] - changed = True - reasons.append('`{opt}\' did not exist or was different'.format(opt=opt)) - else: - if opt in check: - del check[opt] - changed = True - reasons.append('`{opt}\' was removed'.format(opt=opt)) - - if module.params['custom']: - # Convert to json - custom_params = module.params['custom'] - overwrited_fields = set(custom_params.keys()) & set(simple_opts + ['type', 'subdue', 'subdue_begin', 'subdue_end']) - if overwrited_fields: - msg = 'You can\'t overwriting standard module parameters via "custom". You are trying overwrite: {opt}'.format(opt=list(overwrited_fields)) - module.fail_json(msg=msg) - - for k, v in custom_params.items(): - if k in config['checks'][name]: - if not config['checks'][name][k] == v: - changed = True - reasons.append('`custom param {opt}\' was changed'.format(opt=k)) - else: - changed = True - reasons.append('`custom param {opt}\' was added'.format(opt=k)) - check[k] = v - simple_opts += custom_params.keys() - - # Remove obsolete custom params - for opt in set(config['checks'][name].keys()) - set(simple_opts + ['type', 'subdue', 'subdue_begin', 'subdue_end']): - changed = True - reasons.append('`custom param {opt}\' was deleted'.format(opt=opt)) - del check[opt] - - if module.params['metric']: - if 'type' not in check or check['type'] != 'metric': - check['type'] = 'metric' - changed = True - reasons.append('`type\' was not defined or not `metric\'') - if not module.params['metric'] and 'type' in check: - del check['type'] - changed = True - reasons.append('`type\' was defined') - - if module.params['subdue_begin'] is not None and module.params['subdue_end'] is not None: - subdue = {'begin': module.params['subdue_begin'], - 'end': module.params['subdue_end'], - } - if 'subdue' not in check or check['subdue'] != subdue: - check['subdue'] = subdue - changed = True - reasons.append('`subdue\' did not exist or was different') - else: - if 'subdue' in check: - del check['subdue'] - changed = True - reasons.append('`subdue\' was removed') - - if changed and not module.check_mode: - if backup: - module.backup_local(path) - try: - try: - stream = open(path, 'w') - stream.write(json.dumps(config, indent=2) + '\n') - except IOError as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - finally: - if stream: - stream.close() - - return changed, reasons - - -def main(): - - arg_spec = {'name': {'type': 'str', 'required': True}, - 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/checks.json'}, - 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']}, - 'backup': {'type': 'bool', 'default': 'no'}, - 'command': {'type': 'str'}, - 'handlers': {'type': 'list', 'elements': 'str'}, - 'subscribers': {'type': 'list', 'elements': 'str'}, - 'interval': {'type': 'int'}, - 'timeout': {'type': 'int'}, - 'ttl': {'type': 'int'}, - 'handle': {'type': 'bool'}, - 'subdue_begin': {'type': 'str'}, - 'subdue_end': {'type': 'str'}, - 'dependencies': {'type': 'list', 'elements': 'str'}, - 'metric': {'type': 'bool', 'default': 'no'}, - 'standalone': {'type': 'bool'}, - 'publish': {'type': 'bool'}, - 'occurrences': {'type': 'int'}, - 'refresh': {'type': 'int'}, - 'aggregate': {'type': 'bool'}, - 'low_flap_threshold': {'type': 'int'}, - 'high_flap_threshold': {'type': 'int'}, - 'custom': {'type': 'dict'}, - 'source': {'type': 'str'}, - } - - required_together = [['subdue_begin', 'subdue_end']] - - module = AnsibleModule(argument_spec=arg_spec, - required_together=required_together, - supports_check_mode=True) - if module.params['state'] != 'absent' and module.params['command'] is None: - module.fail_json(msg="missing required arguments: %s" % ",".join(['command'])) - - path = module.params['path'] - name = module.params['name'] - state = module.params['state'] - backup = module.params['backup'] - - changed, reasons = sensu_check(module, path, name, state, backup) - - module.exit_json(path=path, changed=changed, msg='OK', name=name, reasons=reasons) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_client.py b/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_client.py deleted file mode 100644 index 886c398e..00000000 --- a/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_client.py +++ /dev/null @@ -1,261 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2017, Red Hat Inc. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: sensu_client -author: "David Moreau Simard (@dmsimard)" -short_description: Manages Sensu client configuration -description: - - Manages Sensu client configuration. - - 'For more information, refer to the Sensu documentation: U(https://sensuapp.org/docs/latest/reference/clients.html)' -options: - state: - type: str - description: - - Whether the client should be present or not - choices: [ 'present', 'absent' ] - default: present - name: - type: str - description: - - A unique name for the client. The name cannot contain special characters or spaces. - - If not specified, it defaults to the system hostname as determined by Ruby Socket.gethostname (provided by Sensu). - address: - type: str - description: - - An address to help identify and reach the client. This is only informational, usually an IP address or hostname. - - If not specified it defaults to non-loopback IPv4 address as determined by Ruby Socket.ip_address_list (provided by Sensu). - subscriptions: - type: list - elements: str - description: - - An array of client subscriptions, a list of roles and/or responsibilities assigned to the system (e.g. webserver). - - These subscriptions determine which monitoring checks are executed by the client, as check requests are sent to subscriptions. - - The subscriptions array items must be strings. - safe_mode: - description: - - If safe mode is enabled for the client. Safe mode requires local check definitions in order to accept a check request and execute the check. - type: bool - default: 'no' - redact: - type: list - elements: str - description: - - Client definition attributes to redact (values) when logging and sending client keepalives. - socket: - type: dict - description: - - The socket definition scope, used to configure the Sensu client socket. - keepalives: - description: - - If Sensu should monitor keepalives for this client. - type: bool - default: 'yes' - keepalive: - type: dict - description: - - The keepalive definition scope, used to configure Sensu client keepalives behavior (e.g. keepalive thresholds, etc). - registration: - type: dict - description: - - The registration definition scope, used to configure Sensu registration event handlers. - deregister: - description: - - If a deregistration event should be created upon Sensu client process stop. - - Default is C(false). - type: bool - deregistration: - type: dict - description: - - The deregistration definition scope, used to configure automated Sensu client de-registration. - ec2: - type: dict - description: - - The ec2 definition scope, used to configure the Sensu Enterprise AWS EC2 integration (Sensu Enterprise users only). - chef: - type: dict - description: - - The chef definition scope, used to configure the Sensu Enterprise Chef integration (Sensu Enterprise users only). - puppet: - type: dict - description: - - The puppet definition scope, used to configure the Sensu Enterprise Puppet integration (Sensu Enterprise users only). - servicenow: - type: dict - description: - - The servicenow definition scope, used to configure the Sensu Enterprise ServiceNow integration (Sensu Enterprise users only). -notes: - - Check mode is supported -''' - -EXAMPLES = ''' -# Minimum possible configuration -- name: Configure Sensu client - community.general.sensu_client: - subscriptions: - - default - -# With customization -- name: Configure Sensu client - community.general.sensu_client: - name: "{{ ansible_fqdn }}" - address: "{{ ansible_default_ipv4['address'] }}" - subscriptions: - - default - - webserver - redact: - - password - socket: - bind: 127.0.0.1 - port: 3030 - keepalive: - thresholds: - warning: 180 - critical: 300 - handlers: - - email - custom: - - broadcast: irc - occurrences: 3 - register: client - notify: - - Restart sensu-client - -- name: Secure Sensu client configuration file - ansible.builtin.file: - path: "{{ client['file'] }}" - owner: "sensu" - group: "sensu" - mode: "0600" - -- name: Delete the Sensu client configuration - community.general.sensu_client: - state: "absent" -''' - -RETURN = ''' -config: - description: Effective client configuration, when state is present - returned: success - type: dict - sample: {'name': 'client', 'subscriptions': ['default']} -file: - description: Path to the client configuration file - returned: success - type: str - sample: "/etc/sensu/conf.d/client.json" -''' - -import json -import os - -from ansible.module_utils.basic import AnsibleModule - - -def main(): - module = AnsibleModule( - supports_check_mode=True, - argument_spec=dict( - state=dict(type='str', choices=['present', 'absent'], default='present'), - name=dict(type='str', ), - address=dict(type='str', ), - subscriptions=dict(type='list', elements="str"), - safe_mode=dict(type='bool', default=False), - redact=dict(type='list', elements="str"), - socket=dict(type='dict'), - keepalives=dict(type='bool', default=True), - keepalive=dict(type='dict'), - registration=dict(type='dict'), - deregister=dict(type='bool'), - deregistration=dict(type='dict'), - ec2=dict(type='dict'), - chef=dict(type='dict'), - puppet=dict(type='dict'), - servicenow=dict(type='dict') - ), - required_if=[ - ['state', 'present', ['subscriptions']] - ] - ) - - state = module.params['state'] - path = "/etc/sensu/conf.d/client.json" - - if state == 'absent': - if os.path.exists(path): - if module.check_mode: - msg = '{path} would have been deleted'.format(path=path) - module.exit_json(msg=msg, changed=True) - else: - try: - os.remove(path) - msg = '{path} deleted successfully'.format(path=path) - module.exit_json(msg=msg, changed=True) - except OSError as e: - msg = 'Exception when trying to delete {path}: {exception}' - module.fail_json( - msg=msg.format(path=path, exception=str(e))) - else: - # Idempotency: it's okay if the file doesn't exist - msg = '{path} already does not exist'.format(path=path) - module.exit_json(msg=msg) - - # Build client configuration from module arguments - config = {'client': {}} - args = ['name', 'address', 'subscriptions', 'safe_mode', 'redact', - 'socket', 'keepalives', 'keepalive', 'registration', 'deregister', - 'deregistration', 'ec2', 'chef', 'puppet', 'servicenow'] - - for arg in args: - if arg in module.params and module.params[arg] is not None: - config['client'][arg] = module.params[arg] - - # Load the current config, if there is one, so we can compare - current_config = None - try: - current_config = json.load(open(path, 'r')) - except (IOError, ValueError): - # File either doesn't exist or it's invalid JSON - pass - - if current_config is not None and current_config == config: - # Config is the same, let's not change anything - module.exit_json(msg='Client configuration is already up to date', - config=config['client'], - file=path) - - # Validate that directory exists before trying to write to it - if not module.check_mode and not os.path.exists(os.path.dirname(path)): - try: - os.makedirs(os.path.dirname(path)) - except OSError as e: - module.fail_json(msg='Unable to create {0}: {1}'.format(os.path.dirname(path), - str(e))) - - if module.check_mode: - module.exit_json(msg='Client configuration would have been updated', - changed=True, - config=config['client'], - file=path) - - try: - with open(path, 'w') as client: - client.write(json.dumps(config, indent=4)) - module.exit_json(msg='Client configuration updated', - changed=True, - config=config['client'], - file=path) - except (OSError, IOError) as e: - module.fail_json(msg='Unable to write file {0}: {1}'.format(path, - str(e))) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_handler.py b/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_handler.py deleted file mode 100644 index 65114798..00000000 --- a/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_handler.py +++ /dev/null @@ -1,273 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2017, Red Hat Inc. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: sensu_handler -author: "David Moreau Simard (@dmsimard)" -short_description: Manages Sensu handler configuration -description: - - Manages Sensu handler configuration - - 'For more information, refer to the Sensu documentation: U(https://sensuapp.org/docs/latest/reference/handlers.html)' -options: - state: - type: str - description: - - Whether the handler should be present or not - choices: [ 'present', 'absent' ] - default: present - name: - type: str - description: - - A unique name for the handler. The name cannot contain special characters or spaces. - required: True - type: - type: str - description: - - The handler type - choices: [ 'pipe', 'tcp', 'udp', 'transport', 'set' ] - filter: - type: str - description: - - The Sensu event filter (name) to use when filtering events for the handler. - filters: - type: list - elements: str - description: - - An array of Sensu event filters (names) to use when filtering events for the handler. - - Each array item must be a string. - severities: - type: list - elements: str - description: - - An array of check result severities the handler will handle. - - 'NOTE: event resolution bypasses this filtering.' - - "Example: [ 'warning', 'critical', 'unknown' ]." - mutator: - type: str - description: - - The Sensu event mutator (name) to use to mutate event data for the handler. - timeout: - type: int - description: - - The handler execution duration timeout in seconds (hard stop). - - Only used by pipe and tcp handler types. - default: 10 - handle_silenced: - description: - - If events matching one or more silence entries should be handled. - type: bool - default: 'no' - handle_flapping: - description: - - If events in the flapping state should be handled. - type: bool - default: 'no' - command: - type: str - description: - - The handler command to be executed. - - The event data is passed to the process via STDIN. - - 'NOTE: the command attribute is only required for Pipe handlers (i.e. handlers configured with "type": "pipe").' - socket: - type: dict - description: - - The socket definition scope, used to configure the TCP/UDP handler socket. - - 'NOTE: the socket attribute is only required for TCP/UDP handlers (i.e. handlers configured with "type": "tcp" or "type": "udp").' - pipe: - type: dict - description: - - The pipe definition scope, used to configure the Sensu transport pipe. - - 'NOTE: the pipe attribute is only required for Transport handlers (i.e. handlers configured with "type": "transport").' - handlers: - type: list - elements: str - description: - - An array of Sensu event handlers (names) to use for events using the handler set. - - 'NOTE: the handlers attribute is only required for handler sets (i.e. handlers configured with "type": "set").' -notes: - - Check mode is supported -''' - -EXAMPLES = ''' -# Configure a handler that sends event data as STDIN (pipe) -- name: Configure IRC Sensu handler - community.general.sensu_handler: - name: "irc_handler" - type: "pipe" - command: "/usr/local/bin/notify-irc.sh" - severities: - - "ok" - - "critical" - - "warning" - - "unknown" - timeout: 15 - notify: - - Restart sensu-client - - Restart sensu-server - -# Delete a handler -- name: Delete IRC Sensu handler - community.general.sensu_handler: - name: "irc_handler" - state: "absent" - -# Example of a TCP handler -- name: Configure TCP Sensu handler - community.general.sensu_handler: - name: "tcp_handler" - type: "tcp" - timeout: 30 - socket: - host: "10.0.1.99" - port: 4444 - register: handler - notify: - - Restart sensu-client - - Restart sensu-server - -- name: Secure Sensu handler configuration file - ansible.builtin.file: - path: "{{ handler['file'] }}" - owner: "sensu" - group: "sensu" - mode: "0600" -''' - -RETURN = ''' -config: - description: Effective handler configuration, when state is present - returned: success - type: dict - sample: {'name': 'irc', 'type': 'pipe', 'command': '/usr/local/bin/notify-irc.sh'} -file: - description: Path to the handler configuration file - returned: success - type: str - sample: "/etc/sensu/conf.d/handlers/irc.json" -name: - description: Name of the handler - returned: success - type: str - sample: "irc" -''' - -import json -import os - -from ansible.module_utils.basic import AnsibleModule - - -def main(): - module = AnsibleModule( - supports_check_mode=True, - argument_spec=dict( - state=dict(type='str', choices=['present', 'absent'], default='present'), - name=dict(type='str', required=True), - type=dict(type='str', choices=['pipe', 'tcp', 'udp', 'transport', 'set']), - filter=dict(type='str'), - filters=dict(type='list', elements='str'), - severities=dict(type='list', elements='str'), - mutator=dict(type='str'), - timeout=dict(type='int', default=10), - handle_silenced=dict(type='bool', default=False), - handle_flapping=dict(type='bool', default=False), - command=dict(type='str'), - socket=dict(type='dict'), - pipe=dict(type='dict'), - handlers=dict(type='list', elements='str'), - ), - required_if=[ - ['state', 'present', ['type']], - ['type', 'pipe', ['command']], - ['type', 'tcp', ['socket']], - ['type', 'udp', ['socket']], - ['type', 'transport', ['pipe']], - ['type', 'set', ['handlers']] - ] - ) - - state = module.params['state'] - name = module.params['name'] - path = '/etc/sensu/conf.d/handlers/{0}.json'.format(name) - - if state == 'absent': - if os.path.exists(path): - if module.check_mode: - msg = '{path} would have been deleted'.format(path=path) - module.exit_json(msg=msg, changed=True) - else: - try: - os.remove(path) - msg = '{path} deleted successfully'.format(path=path) - module.exit_json(msg=msg, changed=True) - except OSError as e: - msg = 'Exception when trying to delete {path}: {exception}' - module.fail_json( - msg=msg.format(path=path, exception=str(e))) - else: - # Idempotency: it's okay if the file doesn't exist - msg = '{path} already does not exist'.format(path=path) - module.exit_json(msg=msg) - - # Build handler configuration from module arguments - config = {'handlers': {name: {}}} - args = ['type', 'filter', 'filters', 'severities', 'mutator', 'timeout', - 'handle_silenced', 'handle_flapping', 'command', 'socket', - 'pipe', 'handlers'] - - for arg in args: - if arg in module.params and module.params[arg] is not None: - config['handlers'][name][arg] = module.params[arg] - - # Load the current config, if there is one, so we can compare - current_config = None - try: - current_config = json.load(open(path, 'r')) - except (IOError, ValueError): - # File either doesn't exist or it's invalid JSON - pass - - if current_config is not None and current_config == config: - # Config is the same, let's not change anything - module.exit_json(msg='Handler configuration is already up to date', - config=config['handlers'][name], - file=path, - name=name) - - # Validate that directory exists before trying to write to it - if not module.check_mode and not os.path.exists(os.path.dirname(path)): - try: - os.makedirs(os.path.dirname(path)) - except OSError as e: - module.fail_json(msg='Unable to create {0}: {1}'.format(os.path.dirname(path), - str(e))) - - if module.check_mode: - module.exit_json(msg='Handler configuration would have been updated', - changed=True, - config=config['handlers'][name], - file=path, - name=name) - - try: - with open(path, 'w') as handler: - handler.write(json.dumps(config, indent=4)) - module.exit_json(msg='Handler configuration updated', - changed=True, - config=config['handlers'][name], - file=path, - name=name) - except (OSError, IOError) as e: - module.fail_json(msg='Unable to write file {0}: {1}'.format(path, - str(e))) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_silence.py b/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_silence.py deleted file mode 100644 index 80a52167..00000000 --- a/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_silence.py +++ /dev/null @@ -1,297 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2017, Steven Bambling -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: sensu_silence -author: Steven Bambling (@smbambling) -short_description: Manage Sensu silence entries -description: - - Create and clear (delete) a silence entries via the Sensu API - for subscriptions and checks. -options: - check: - type: str - description: - - Specifies the check which the silence entry applies to. - creator: - type: str - description: - - Specifies the entity responsible for this entry. - expire: - type: int - description: - - If specified, the silence entry will be automatically cleared - after this number of seconds. - expire_on_resolve: - description: - - If specified as true, the silence entry will be automatically - cleared once the condition it is silencing is resolved. - type: bool - reason: - type: str - description: - - If specified, this free-form string is used to provide context or - rationale for the reason this silence entry was created. - state: - type: str - description: - - Specifies to create or clear (delete) a silence entry via the Sensu API - default: present - choices: ['present', 'absent'] - subscription: - type: str - description: - - Specifies the subscription which the silence entry applies to. - - To create a silence entry for a client prepend C(client:) to client name. - Example - C(client:server1.example.dev) - required: true - url: - type: str - description: - - Specifies the URL of the Sensu monitoring host server. - required: false - default: http://127.0.01:4567 -''' - -EXAMPLES = ''' -# Silence ALL checks for a given client -- name: Silence server1.example.dev - community.general.sensu_silence: - subscription: client:server1.example.dev - creator: "{{ ansible_user_id }}" - reason: Performing maintenance - -# Silence specific check for a client -- name: Silence CPU_Usage check for server1.example.dev - community.general.sensu_silence: - subscription: client:server1.example.dev - check: CPU_Usage - creator: "{{ ansible_user_id }}" - reason: Investigation alert issue - -# Silence multiple clients from a dict - silence: - server1.example.dev: - reason: 'Deployment in progress' - server2.example.dev: - reason: 'Deployment in progress' - -- name: Silence several clients from a dict - community.general.sensu_silence: - subscription: "client:{{ item.key }}" - reason: "{{ item.value.reason }}" - creator: "{{ ansible_user_id }}" - with_dict: "{{ silence }}" -''' - -RETURN = ''' -''' - -import json - -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url - - -def query(module, url, check, subscription): - headers = { - 'Content-Type': 'application/json', - } - - url = url + '/silenced' - - request_data = { - 'check': check, - 'subscription': subscription, - } - - # Remove keys with None value - for k, v in dict(request_data).items(): - if v is None: - del request_data[k] - - response, info = fetch_url( - module, url, method='GET', - headers=headers, data=json.dumps(request_data) - ) - - if info['status'] == 500: - module.fail_json( - msg="Failed to query silence %s. Reason: %s" % (subscription, info) - ) - - try: - json_out = json.loads(to_native(response.read())) - except Exception: - json_out = "" - - return False, json_out, False - - -def clear(module, url, check, subscription): - # Test if silence exists before clearing - (rc, out, changed) = query(module, url, check, subscription) - - d = dict((i['subscription'], i['check']) for i in out) - subscription_exists = subscription in d - if check and subscription_exists: - exists = (check == d[subscription]) - else: - exists = subscription_exists - - # If check/subscription doesn't exist - # exit with changed state of False - if not exists: - return False, out, changed - - # module.check_mode is inherited from the AnsibleMOdule class - if not module.check_mode: - headers = { - 'Content-Type': 'application/json', - } - - url = url + '/silenced/clear' - - request_data = { - 'check': check, - 'subscription': subscription, - } - - # Remove keys with None value - for k, v in dict(request_data).items(): - if v is None: - del request_data[k] - - response, info = fetch_url( - module, url, method='POST', - headers=headers, data=json.dumps(request_data) - ) - - if info['status'] != 204: - module.fail_json( - msg="Failed to silence %s. Reason: %s" % (subscription, info) - ) - - try: - json_out = json.loads(to_native(response.read())) - except Exception: - json_out = "" - - return False, json_out, True - return False, out, True - - -def create( - module, url, check, creator, expire, - expire_on_resolve, reason, subscription): - (rc, out, changed) = query(module, url, check, subscription) - for i in out: - if (i['subscription'] == subscription): - if ( - (check is None or check == i['check']) and - ( - creator == '' or - creator == i['creator']) and - ( - reason == '' or - reason == i['reason']) and - ( - expire is None or expire == i['expire']) and - ( - expire_on_resolve is None or - expire_on_resolve == i['expire_on_resolve'] - ) - ): - return False, out, False - - # module.check_mode is inherited from the AnsibleMOdule class - if not module.check_mode: - headers = { - 'Content-Type': 'application/json', - } - - url = url + '/silenced' - - request_data = { - 'check': check, - 'creator': creator, - 'expire': expire, - 'expire_on_resolve': expire_on_resolve, - 'reason': reason, - 'subscription': subscription, - } - - # Remove keys with None value - for k, v in dict(request_data).items(): - if v is None: - del request_data[k] - - response, info = fetch_url( - module, url, method='POST', - headers=headers, data=json.dumps(request_data) - ) - - if info['status'] != 201: - module.fail_json( - msg="Failed to silence %s. Reason: %s" % - (subscription, info['msg']) - ) - - try: - json_out = json.loads(to_native(response.read())) - except Exception: - json_out = "" - - return False, json_out, True - return False, out, True - - -def main(): - module = AnsibleModule( - argument_spec=dict( - check=dict(required=False), - creator=dict(required=False), - expire=dict(type='int', required=False), - expire_on_resolve=dict(type='bool', required=False), - reason=dict(required=False), - state=dict(default='present', choices=['present', 'absent']), - subscription=dict(required=True), - url=dict(required=False, default='http://127.0.01:4567'), - ), - supports_check_mode=True - ) - - url = module.params['url'] - check = module.params['check'] - creator = module.params['creator'] - expire = module.params['expire'] - expire_on_resolve = module.params['expire_on_resolve'] - reason = module.params['reason'] - subscription = module.params['subscription'] - state = module.params['state'] - - if state == 'present': - (rc, out, changed) = create( - module, url, check, creator, - expire, expire_on_resolve, reason, subscription - ) - - if state == 'absent': - (rc, out, changed) = clear(module, url, check, subscription) - - if rc != 0: - module.fail_json(msg="failed", result=out) - module.exit_json(msg="success", result=out, changed=changed) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_subscription.py b/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_subscription.py deleted file mode 100644 index 947c6e0d..00000000 --- a/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_subscription.py +++ /dev/null @@ -1,152 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Anders Ingemann -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: sensu_subscription -short_description: Manage Sensu subscriptions -description: - - Manage which I(sensu channels) a machine should subscribe to -options: - name: - type: str - description: - - The name of the channel - required: true - state: - type: str - description: - - Whether the machine should subscribe or unsubscribe from the channel - choices: [ 'present', 'absent' ] - required: false - default: present - path: - type: str - description: - - Path to the subscriptions json file - required: false - default: /etc/sensu/conf.d/subscriptions.json - backup: - description: - - Create a backup file (if yes), including the timestamp information so you - - can get the original file back if you somehow clobbered it incorrectly. - type: bool - required: false - default: no -requirements: [ ] -author: Anders Ingemann (@andsens) -''' - -RETURN = ''' -reasons: - description: the reasons why the module changed or did not change something - returned: success - type: list - sample: ["channel subscription was absent and state is `present'"] -''' - -EXAMPLES = ''' -# Subscribe to the nginx channel -- name: Subscribe to nginx checks - community.general.sensu_subscription: name=nginx - -# Unsubscribe from the common checks channel -- name: Unsubscribe from common checks - community.general.sensu_subscription: name=common state=absent -''' - -import json -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - - -def sensu_subscription(module, path, name, state='present', backup=False): - changed = False - reasons = [] - - try: - config = json.load(open(path)) - except IOError as e: - if e.errno == 2: # File not found, non-fatal - if state == 'absent': - reasons.append('file did not exist and state is `absent\'') - return changed, reasons - config = {} - else: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - except ValueError: - msg = '{path} contains invalid JSON'.format(path=path) - module.fail_json(msg=msg) - - if 'client' not in config: - if state == 'absent': - reasons.append('`client\' did not exist and state is `absent\'') - return changed, reasons - config['client'] = {} - changed = True - reasons.append('`client\' did not exist') - - if 'subscriptions' not in config['client']: - if state == 'absent': - reasons.append('`client.subscriptions\' did not exist and state is `absent\'') - return changed, reasons - config['client']['subscriptions'] = [] - changed = True - reasons.append('`client.subscriptions\' did not exist') - - if name not in config['client']['subscriptions']: - if state == 'absent': - reasons.append('channel subscription was absent') - return changed, reasons - config['client']['subscriptions'].append(name) - changed = True - reasons.append('channel subscription was absent and state is `present\'') - else: - if state == 'absent': - config['client']['subscriptions'].remove(name) - changed = True - reasons.append('channel subscription was present and state is `absent\'') - - if changed and not module.check_mode: - if backup: - module.backup_local(path) - try: - open(path, 'w').write(json.dumps(config, indent=2) + '\n') - except IOError as e: - module.fail_json(msg='Failed to write to file %s: %s' % (path, to_native(e)), - exception=traceback.format_exc()) - - return changed, reasons - - -def main(): - arg_spec = {'name': {'type': 'str', 'required': True}, - 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/subscriptions.json'}, - 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']}, - 'backup': {'type': 'bool', 'default': 'no'}, - } - - module = AnsibleModule(argument_spec=arg_spec, - supports_check_mode=True) - - path = module.params['path'] - name = module.params['name'] - state = module.params['state'] - backup = module.params['backup'] - - changed, reasons = sensu_subscription(module, path, name, state, backup) - - module.exit_json(path=path, name=name, changed=changed, msg='OK', reasons=reasons) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/monitoring/spectrum_device.py b/ansible_collections/community/general/plugins/modules/monitoring/spectrum_device.py deleted file mode 100644 index 77e3b153..00000000 --- a/ansible_collections/community/general/plugins/modules/monitoring/spectrum_device.py +++ /dev/null @@ -1,332 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2016, Renato Orgito -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: spectrum_device -short_description: Creates/deletes devices in CA Spectrum. -description: - - This module allows you to create and delete devices in CA Spectrum U(https://www.ca.com/us/products/ca-spectrum.html). - - Tested on CA Spectrum 9.4.2, 10.1.1 and 10.2.1 -author: "Renato Orgito (@orgito)" -options: - device: - type: str - aliases: [ host, name ] - required: true - description: - - IP address of the device. - - If a hostname is given, it will be resolved to the IP address. - community: - type: str - description: - - SNMP community used for device discovery. - - Required when C(state=present). - required: true - landscape: - type: str - required: true - description: - - Landscape handle of the SpectroServer to which add or remove the device. - state: - type: str - required: false - description: - - On C(present) creates the device when it does not exist. - - On C(absent) removes the device when it exists. - choices: ['present', 'absent'] - default: 'present' - url: - type: str - aliases: [ oneclick_url ] - required: true - description: - - HTTP, HTTPS URL of the Oneclick server in the form (http|https)://host.domain[:port] - url_username: - type: str - aliases: [ oneclick_user ] - required: true - description: - - Oneclick user name. - url_password: - type: str - aliases: [ oneclick_password ] - required: true - description: - - Oneclick user password. - use_proxy: - required: false - description: - - if C(no), it will not use a proxy, even if one is defined in an environment - variable on the target hosts. - default: 'yes' - type: bool - validate_certs: - required: false - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - default: 'yes' - type: bool - agentport: - type: int - required: false - description: - - UDP port used for SNMP discovery. - default: 161 -notes: - - The devices will be created inside the I(Universe) container of the specified landscape. - - All the operations will be performed only on the specified landscape. -''' - -EXAMPLES = ''' -- name: Add device to CA Spectrum - local_action: - module: spectrum_device - device: '{{ ansible_host }}' - community: secret - landscape: '0x100000' - oneclick_url: http://oneclick.example.com:8080 - oneclick_user: username - oneclick_password: password - state: present - - -- name: Remove device from CA Spectrum - local_action: - module: spectrum_device - device: '{{ ansible_host }}' - landscape: '{{ landscape_handle }}' - oneclick_url: http://oneclick.example.com:8080 - oneclick_user: username - oneclick_password: password - use_proxy: no - state: absent -''' - -RETURN = ''' -device: - description: device data when state = present - returned: success - type: dict - sample: {'model_handle': '0x1007ab', 'landscape': '0x100000', 'address': '10.10.5.1'} -''' - -from socket import gethostbyname, gaierror -import xml.etree.ElementTree as ET - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url - - -def request(resource, xml=None, method=None): - headers = { - "Content-Type": "application/xml", - "Accept": "application/xml" - } - - url = module.params['oneclick_url'] + '/spectrum/restful/' + resource - - response, info = fetch_url(module, url, data=xml, method=method, headers=headers, timeout=45) - - if info['status'] == 401: - module.fail_json(msg="failed to authenticate to Oneclick server") - - if info['status'] not in (200, 201, 204): - module.fail_json(msg=info['msg']) - - return response.read() - - -def post(resource, xml=None): - return request(resource, xml=xml, method='POST') - - -def delete(resource): - return request(resource, xml=None, method='DELETE') - - -def get_ip(): - try: - device_ip = gethostbyname(module.params.get('device')) - except gaierror: - module.fail_json(msg="failed to resolve device ip address for '%s'" % module.params.get('device')) - - return device_ip - - -def get_device(device_ip): - """Query OneClick for the device using the IP Address""" - resource = '/models' - landscape_min = "0x%x" % int(module.params.get('landscape'), 16) - landscape_max = "0x%x" % (int(module.params.get('landscape'), 16) + 0x100000) - - xml = """ - - - - - - - - - SearchManager - - - - {mh_min} - - - - - {mh_max} - - - - - FIND_DEV_MODELS_BY_IP - - {search_ip} - - - - - - - - """.format(search_ip=device_ip, mh_min=landscape_min, mh_max=landscape_max) - - result = post(resource, xml=xml) - - root = ET.fromstring(result) - - if root.get('total-models') == '0': - return None - - namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response') - - # get the first device - model = root.find('ca:model-responses', namespace).find('ca:model', namespace) - - if model.get('error'): - module.fail_json(msg="error checking device: %s" % model.get('error')) - - # get the attributes - model_handle = model.get('mh') - - model_address = model.find('./*[@id="0x12d7f"]').text - - # derive the landscape handler from the model handler of the device - model_landscape = "0x%x" % int(int(model_handle, 16) // 0x100000 * 0x100000) - - device = dict( - model_handle=model_handle, - address=model_address, - landscape=model_landscape) - - return device - - -def add_device(): - device_ip = get_ip() - device = get_device(device_ip) - - if device: - module.exit_json(changed=False, device=device) - - if module.check_mode: - device = dict( - model_handle=None, - address=device_ip, - landscape="0x%x" % int(module.params.get('landscape'), 16)) - module.exit_json(changed=True, device=device) - - resource = 'model?ipaddress=' + device_ip + '&commstring=' + module.params.get('community') - resource += '&landscapeid=' + module.params.get('landscape') - - if module.params.get('agentport', None): - resource += '&agentport=' + str(module.params.get('agentport', 161)) - - result = post(resource) - root = ET.fromstring(result) - - if root.get('error') != 'Success': - module.fail_json(msg=root.get('error-message')) - - namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response') - model = root.find('ca:model', namespace) - - model_handle = model.get('mh') - model_landscape = "0x%x" % int(int(model_handle, 16) // 0x100000 * 0x100000) - - device = dict( - model_handle=model_handle, - address=device_ip, - landscape=model_landscape, - ) - - module.exit_json(changed=True, device=device) - - -def remove_device(): - device_ip = get_ip() - device = get_device(device_ip) - - if device is None: - module.exit_json(changed=False) - - if module.check_mode: - module.exit_json(changed=True) - - resource = '/model/' + device['model_handle'] - result = delete(resource) - - root = ET.fromstring(result) - - namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response') - error = root.find('ca:error', namespace).text - - if error != 'Success': - error_message = root.find('ca:error-message', namespace).text - module.fail_json(msg="%s %s" % (error, error_message)) - - module.exit_json(changed=True) - - -def main(): - global module - module = AnsibleModule( - argument_spec=dict( - device=dict(required=True, aliases=['host', 'name']), - landscape=dict(required=True), - state=dict(choices=['present', 'absent'], default='present'), - community=dict(required=True, no_log=True), # @TODO remove the 'required', given the required_if ? - agentport=dict(type='int', default=161), - url=dict(required=True, aliases=['oneclick_url']), - url_username=dict(required=True, aliases=['oneclick_user']), - url_password=dict(required=True, no_log=True, aliases=['oneclick_password']), - use_proxy=dict(type='bool', default=True), - validate_certs=dict(type='bool', default=True), - ), - required_if=[('state', 'present', ['community'])], - supports_check_mode=True - ) - - if module.params.get('state') == 'present': - add_device() - else: - remove_device() - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/monitoring/spectrum_model_attrs.py b/ansible_collections/community/general/plugins/modules/monitoring/spectrum_model_attrs.py deleted file mode 100644 index 231352ac..00000000 --- a/ansible_collections/community/general/plugins/modules/monitoring/spectrum_model_attrs.py +++ /dev/null @@ -1,528 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2021, Tyler Gates -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: spectrum_model_attrs -short_description: Enforce a model's attributes in CA Spectrum. -description: - - This module can be used to enforce a model's attributes in CA Spectrum. -version_added: 2.5.0 -author: - - Tyler Gates (@tgates81) -notes: - - Tested on CA Spectrum version 10.4.2.0.189. - - Model creation and deletion are not possible with this module. For that use M(community.general.spectrum_device) instead. -requirements: - - 'python >= 2.7' -options: - url: - description: - - URL of OneClick server. - type: str - required: true - url_username: - description: - - OneClick username. - type: str - required: true - aliases: [username] - url_password: - description: - - OneClick password. - type: str - required: true - aliases: [password] - use_proxy: - description: - - if C(no), it will not use a proxy, even if one is defined in - an environment variable on the target hosts. - default: yes - required: false - type: bool - name: - description: - - Model name. - type: str - required: true - type: - description: - - Model type. - type: str - required: true - validate_certs: - description: - - Validate SSL certificates. Only change this to C(false) if you can guarantee that you are talking to the correct endpoint and there is no - man-in-the-middle attack happening. - type: bool - default: yes - required: false - attributes: - description: - - A list of attribute names and values to enforce. - - All values and parameters are case sensitive and must be provided as strings only. - required: true - type: list - elements: dict - suboptions: - name: - description: - - Attribute name OR hex ID. - - 'Currently defined names are:' - - ' C(App_Manufacturer) (C(0x230683))' - - ' C(CollectionsModelNameString) (C(0x12adb))' - - ' C(Condition) (C(0x1000a))' - - ' C(Criticality) (C(0x1290c))' - - ' C(DeviceType) (C(0x23000e))' - - ' C(isManaged) (C(0x1295d))' - - ' C(Model_Class) (C(0x11ee8))' - - ' C(Model_Handle) (C(0x129fa))' - - ' C(Model_Name) (C(0x1006e))' - - ' C(Modeltype_Handle) (C(0x10001))' - - ' C(Modeltype_Name) (C(0x10000))' - - ' C(Network_Address) (C(0x12d7f))' - - ' C(Notes) (C(0x11564))' - - ' C(ServiceDesk_Asset_ID) (C(0x12db9))' - - ' C(TopologyModelNameString) (C(0x129e7))' - - ' C(sysDescr) (C(0x10052))' - - ' C(sysName) (C(0x10b5b))' - - ' C(Vendor_Name) (C(0x11570))' - - ' C(Description) (C(0x230017))' - - Hex IDs are the direct identifiers in Spectrum and will always work. - - 'To lookup hex IDs go to the UI: Locator -> Devices -> By Model Name -> -> Attributes tab.' - type: str - required: true - value: - description: - - Attribute value. Empty strings should be C("") or C(null). - type: str - required: true -''' - -EXAMPLES = r''' -- name: Enforce maintenance mode for modelxyz01 with a note about why - community.general.spectrum_model_attrs: - url: "http://oneclick.url.com" - username: "{{ oneclick_username }}" - password: "{{ oneclick_password }}" - name: "modelxyz01" - type: "Host_Device" - validate_certs: true - attributes: - - name: "isManaged" - value: "false" - - name: "Notes" - value: "MM set on {{ ansible_date_time.iso8601 }} via CO {{ CO }} by {{ tower_user_name | default(ansible_user_id) }}" - delegate_to: localhost - register: spectrum_model_attrs_status -''' - -RETURN = r''' -msg: - description: Informational message on the job result. - type: str - returned: always - sample: 'Success' -changed_attrs: - description: Dictionary of changed name or hex IDs (whichever was specified) to their new corresponding values. - type: dict - returned: always - sample: { - "Notes": "MM set on 2021-02-03T22:04:02Z via CO CO9999 by tgates", - "isManaged": "true" - } -''' - - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.urls import fetch_url -from ansible.module_utils.six.moves.urllib.parse import quote -import json -import re -import xml.etree.ElementTree as ET - - -class spectrum_model_attrs: - def __init__(self, module): - self.module = module - self.url = module.params['url'] - # If the user did not define a full path to the restul space in url: - # params, add what we believe it to be. - if not re.search('\\/.+', self.url.split('://')[1]): - self.url = "%s/spectrum/restful" % self.url.rstrip('/') - # Align these with what is defined in OneClick's UI under: - # Locator -> Devices -> By Model Name -> -> - # Attributes tab. - self.attr_map = dict(App_Manufacturer=hex(0x230683), - CollectionsModelNameString=hex(0x12adb), - Condition=hex(0x1000a), - Criticality=hex(0x1290c), - DeviceType=hex(0x23000e), - isManaged=hex(0x1295d), - Model_Class=hex(0x11ee8), - Model_Handle=hex(0x129fa), - Model_Name=hex(0x1006e), - Modeltype_Handle=hex(0x10001), - Modeltype_Name=hex(0x10000), - Network_Address=hex(0x12d7f), - Notes=hex(0x11564), - ServiceDesk_Asset_ID=hex(0x12db9), - TopologyModelNameString=hex(0x129e7), - sysDescr=hex(0x10052), - sysName=hex(0x10b5b), - Vendor_Name=hex(0x11570), - Description=hex(0x230017)) - self.search_qualifiers = [ - "and", "or", "not", "greater-than", "greater-than-or-equals", - "less-than", "less-than-or-equals", "equals", "equals-ignore-case", - "does-not-equal", "does-not-equal-ignore-case", "has-prefix", - "does-not-have-prefix", "has-prefix-ignore-case", - "does-not-have-prefix-ignore-case", "has-substring", - "does-not-have-substring", "has-substring-ignore-case", - "does-not-have-substring-ignore-case", "has-suffix", - "does-not-have-suffix", "has-suffix-ignore-case", - "does-not-have-suffix-ignore-case", "has-pcre", - "has-pcre-ignore-case", "has-wildcard", "has-wildcard-ignore-case", - "is-derived-from", "not-is-derived-from"] - - self.resp_namespace = dict(ca="http://www.ca.com/spectrum/restful/schema/response") - - self.result = dict(msg="", changed_attrs=dict()) - self.success_msg = "Success" - - def build_url(self, path): - """ - Build a sane Spectrum restful API URL - :param path: The path to append to the restful base - :type path: str - :returns: Complete restful API URL - :rtype: str - """ - - return "%s/%s" % (self.url.rstrip('/'), path.lstrip('/')) - - def attr_id(self, name): - """ - Get attribute hex ID - :param name: The name of the attribute to retrieve the hex ID for - :type name: str - :returns: Translated hex ID of name, or None if no translation found - :rtype: str or None - """ - - try: - return self.attr_map[name] - except KeyError: - return None - - def attr_name(self, _id): - """ - Get attribute name from hex ID - :param _id: The hex ID to lookup a name for - :type _id: str - :returns: Translated name of hex ID, or None if no translation found - :rtype: str or None - """ - - for name, m_id in list(self.attr_map.items()): - if _id == m_id: - return name - return None - - def urlencode(self, string): - """ - URL Encode a string - :param: string: The string to URL encode - :type string: str - :returns: URL encode version of supplied string - :rtype: str - """ - - return quote(string, "<>%-_.!*'():?#/@&+,;=") - - def update_model(self, model_handle, attrs): - """ - Update a model's attributes - :param model_handle: The model's handle ID - :type model_handle: str - :param attrs: Model's attributes to update. {'': ''} - :type attrs: dict - :returns: Nothing; exits on error or updates self.results - :rtype: None - """ - - # Build the update URL - update_url = self.build_url("/model/%s?" % model_handle) - for name, val in list(attrs.items()): - if val is None: - # None values should be converted to empty strings - val = "" - val = self.urlencode(str(val)) - if not update_url.endswith('?'): - update_url += "&" - - update_url += "attr=%s&val=%s" % (self.attr_id(name) or name, val) - - # POST to /model to update the attributes, or fail. - resp, info = fetch_url(self.module, update_url, method="PUT", - headers={"Content-Type": "application/json", - "Accept": "application/json"}, - use_proxy=self.module.params['use_proxy']) - status_code = info["status"] - if status_code >= 400: - body = info['body'] - else: - body = "" if resp is None else resp.read() - if status_code != 200: - self.result['msg'] = "HTTP PUT error %s: %s: %s" % (status_code, update_url, body) - self.module.fail_json(**self.result) - - # Load and parse the JSON response and either fail or set results. - json_resp = json.loads(body) - """ - Example success response: - {'model-update-response-list':{'model-responses':{'model':{'@error':'Success','@mh':'0x1010e76','attribute':{'@error':'Success','@id':'0x1295d'}}}}}" - Example failure response: - {'model-update-response-list': {'model-responses': {'model': {'@error': 'PartialFailure', '@mh': '0x1010e76', 'attribute': {'@error-message': 'brn0vlappua001: You do not have permission to set attribute Network_Address for this model.', '@error': 'Error', '@id': '0x12d7f'}}}}} - """ # noqa - model_resp = json_resp['model-update-response-list']['model-responses']['model'] - if model_resp['@error'] != "Success": - # I'm not 100% confident on the expected failure structure so just - # dump all of ['attribute']. - self.result['msg'] = str(model_resp['attribute']) - self.module.fail_json(**self.result) - - # Should be OK if we get to here, set results. - self.result['msg'] = self.success_msg - self.result['changed_attrs'].update(attrs) - self.result['changed'] = True - - def find_model(self, search_criteria, ret_attrs=None): - """ - Search for a model in /models - :param search_criteria: The XML - :type search_criteria: str - :param ret_attrs: List of attributes by name or ID to return back - (default is Model_Handle) - :type ret_attrs: list - returns: Dictionary mapping of ret_attrs to values: {ret_attr: ret_val} - rtype: dict - """ - - # If no return attributes were asked for, return Model_Handle. - if ret_attrs is None: - ret_attrs = ['Model_Handle'] - - # Set the XML > tags. If no hex ID - # is found for the name, assume it is already in hex. {name: hex ID} - rqstd_attrs = "" - for ra in ret_attrs: - _id = self.attr_id(ra) or ra - rqstd_attrs += '' % (self.attr_id(ra) or ra) - - # Build the complete XML search query for HTTP POST. - xml = """ - - - - - {0} - - - - {1} - -""".format(search_criteria, rqstd_attrs) - - # POST to /models and fail on errors. - url = self.build_url("/models") - resp, info = fetch_url(self.module, url, data=xml, method="POST", - use_proxy=self.module.params['use_proxy'], - headers={"Content-Type": "application/xml", - "Accept": "application/xml"}) - status_code = info["status"] - if status_code >= 400: - body = info['body'] - else: - body = "" if resp is None else resp.read() - if status_code != 200: - self.result['msg'] = "HTTP POST error %s: %s: %s" % (status_code, url, body) - self.module.fail_json(**self.result) - - # Parse through the XML response and fail on any detected errors. - root = ET.fromstring(body) - total_models = int(root.attrib['total-models']) - error = root.attrib['error'] - model_responses = root.find('ca:model-responses', self.resp_namespace) - if total_models < 1: - self.result['msg'] = "No models found matching search criteria `%s'" % search_criteria - self.module.fail_json(**self.result) - elif total_models > 1: - self.result['msg'] = "More than one model found (%s): `%s'" % (total_models, ET.tostring(model_responses, - encoding='unicode')) - self.module.fail_json(**self.result) - if error != "EndOfResults": - self.result['msg'] = "Unexpected search response `%s': %s" % (error, ET.tostring(model_responses, - encoding='unicode')) - self.module.fail_json(**self.result) - model = model_responses.find('ca:model', self.resp_namespace) - attrs = model.findall('ca:attribute', self.resp_namespace) - if not attrs: - self.result['msg'] = "No attributes returned." - self.module.fail_json(**self.result) - - # XML response should be successful. Iterate and set each returned - # attribute ID/name and value for return. - ret = dict() - for attr in attrs: - attr_id = attr.get('id') - attr_name = self.attr_name(attr_id) - # Note: all values except empty strings (None) are strings only! - attr_val = attr.text - key = attr_name if attr_name in ret_attrs else attr_id - ret[key] = attr_val - ret_attrs.remove(key) - return ret - - def find_model_by_name_type(self, mname, mtype, ret_attrs=None): - """ - Find a model by name and type - :param mname: Model name - :type mname: str - :param mtype: Model type - :type mtype: str - :param ret_attrs: List of attributes by name or ID to return back - (default is Model_Handle) - :type ret_attrs: list - returns: find_model(): Dictionary mapping of ret_attrs to values: - {ret_attr: ret_val} - rtype: dict - """ - - # If no return attributes were asked for, return Model_Handle. - if ret_attrs is None: - ret_attrs = ['Model_Handle'] - - """This is basically as follows: - - - - - ... - - - - - - - - """ - - # Parent filter tag - filtered_models = ET.Element('filtered-models') - # Logically and - _and = ET.SubElement(filtered_models, 'and') - - # Model Name - MN_equals = ET.SubElement(_and, 'equals') - Model_Name = ET.SubElement(MN_equals, 'attribute', - {'id': self.attr_map['Model_Name']}) - MN_value = ET.SubElement(Model_Name, 'value') - MN_value.text = mname - - # Model Type Name - MTN_equals = ET.SubElement(_and, 'equals') - Modeltype_Name = ET.SubElement(MTN_equals, 'attribute', - {'id': self.attr_map['Modeltype_Name']}) - MTN_value = ET.SubElement(Modeltype_Name, 'value') - MTN_value.text = mtype - - return self.find_model(ET.tostring(filtered_models, - encoding='unicode'), - ret_attrs) - - def ensure_model_attrs(self): - - # Get a list of all requested attribute names/IDs plus Model_Handle and - # use them to query the values currently set. Store finding in a - # dictionary. - req_attrs = [] - for attr in self.module.params['attributes']: - req_attrs.append(attr['name']) - if 'Model_Handle' not in req_attrs: - req_attrs.append('Model_Handle') - - # Survey attributes currently set and store in a dict. - cur_attrs = self.find_model_by_name_type(self.module.params['name'], - self.module.params['type'], - req_attrs) - - # Iterate through the requested attributes names/IDs values pair and - # compare with those currently set. If different, attempt to change. - Model_Handle = cur_attrs.pop("Model_Handle") - for attr in self.module.params['attributes']: - req_name = attr['name'] - req_val = attr['value'] - if req_val == "": - # The API will return None on empty string - req_val = None - if cur_attrs[req_name] != req_val: - if self.module.check_mode: - self.result['changed_attrs'][req_name] = req_val - self.result['msg'] = self.success_msg - self.result['changed'] = True - continue - resp = self.update_model(Model_Handle, {req_name: req_val}) - - self.module.exit_json(**self.result) - - -def run_module(): - argument_spec = dict( - url=dict(type='str', required=True), - url_username=dict(type='str', required=True, aliases=['username']), - url_password=dict(type='str', required=True, aliases=['password'], - no_log=True), - validate_certs=dict(type='bool', default=True), - use_proxy=dict(type='bool', default=True), - name=dict(type='str', required=True), - type=dict(type='str', required=True), - attributes=dict(type='list', - required=True, - elements='dict', - options=dict( - name=dict(type='str', required=True), - value=dict(type='str', required=True) - )), - ) - module = AnsibleModule( - supports_check_mode=True, - argument_spec=argument_spec, - ) - - try: - sm = spectrum_model_attrs(module) - sm.ensure_model_attrs() - except Exception as e: - module.fail_json(msg="Failed to ensure attribute(s) on `%s' with " - "exception: %s" % (module.params['name'], - to_native(e))) - - -def main(): - run_module() - - -if __name__ == "__main__": - main() diff --git a/ansible_collections/community/general/plugins/modules/monitoring/stackdriver.py b/ansible_collections/community/general/plugins/modules/monitoring/stackdriver.py deleted file mode 100644 index fa6bacb9..00000000 --- a/ansible_collections/community/general/plugins/modules/monitoring/stackdriver.py +++ /dev/null @@ -1,215 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' - -module: stackdriver -short_description: Send code deploy and annotation events to stackdriver -description: - - Send code deploy and annotation events to Stackdriver -author: "Ben Whaley (@bwhaley)" -options: - key: - type: str - description: - - API key. - required: true - event: - type: str - description: - - The type of event to send, either annotation or deploy - choices: ['annotation', 'deploy'] - required: true - revision_id: - type: str - description: - - The revision of the code that was deployed. Required for deploy events - deployed_by: - type: str - description: - - The person or robot responsible for deploying the code - default: "Ansible" - deployed_to: - type: str - description: - - "The environment code was deployed to. (ie: development, staging, production)" - repository: - type: str - description: - - The repository (or project) deployed - msg: - type: str - description: - - The contents of the annotation message, in plain text. Limited to 256 characters. Required for annotation. - annotated_by: - type: str - description: - - The person or robot who the annotation should be attributed to. - default: "Ansible" - level: - type: str - description: - - one of INFO/WARN/ERROR, defaults to INFO if not supplied. May affect display. - choices: ['INFO', 'WARN', 'ERROR'] - default: 'INFO' - instance_id: - type: str - description: - - id of an EC2 instance that this event should be attached to, which will limit the contexts where this event is shown - event_epoch: - type: str - description: - - "Unix timestamp of where the event should appear in the timeline, defaults to now. Be careful with this." -''' - -EXAMPLES = ''' -- name: Send a code deploy event to stackdriver - community.general.stackdriver: - key: AAAAAA - event: deploy - deployed_to: production - deployed_by: leeroyjenkins - repository: MyWebApp - revision_id: abcd123 - -- name: Send an annotation event to stackdriver - community.general.stackdriver: - key: AAAAAA - event: annotation - msg: Greetings from Ansible - annotated_by: leeroyjenkins - level: WARN - instance_id: i-abcd1234 -''' - -# =========================================== -# Stackdriver module specific support methods. -# - -import json -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.urls import fetch_url - - -def send_deploy_event(module, key, revision_id, deployed_by='Ansible', deployed_to=None, repository=None): - """Send a deploy event to Stackdriver""" - deploy_api = "https://event-gateway.stackdriver.com/v1/deployevent" - - params = {} - params['revision_id'] = revision_id - params['deployed_by'] = deployed_by - if deployed_to: - params['deployed_to'] = deployed_to - if repository: - params['repository'] = repository - - return do_send_request(module, deploy_api, params, key) - - -def send_annotation_event(module, key, msg, annotated_by='Ansible', level=None, instance_id=None, event_epoch=None): - """Send an annotation event to Stackdriver""" - annotation_api = "https://event-gateway.stackdriver.com/v1/annotationevent" - - params = {} - params['message'] = msg - if annotated_by: - params['annotated_by'] = annotated_by - if level: - params['level'] = level - if instance_id: - params['instance_id'] = instance_id - if event_epoch: - params['event_epoch'] = event_epoch - - return do_send_request(module, annotation_api, params, key) - - -def do_send_request(module, url, params, key): - data = json.dumps(params) - headers = { - 'Content-Type': 'application/json', - 'x-stackdriver-apikey': key - } - response, info = fetch_url(module, url, headers=headers, data=data, method='POST') - if info['status'] != 200: - module.fail_json(msg="Unable to send msg: %s" % info['msg']) - - -# =========================================== -# Module execution. -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( # @TODO add types - key=dict(required=True, no_log=True), - event=dict(required=True, choices=['deploy', 'annotation']), - msg=dict(), - revision_id=dict(), - annotated_by=dict(default='Ansible'), - level=dict(default='INFO', choices=['INFO', 'WARN', 'ERROR']), - instance_id=dict(), - event_epoch=dict(), # @TODO int? - deployed_by=dict(default='Ansible'), - deployed_to=dict(), - repository=dict(), - ), - supports_check_mode=True - ) - - key = module.params["key"] - event = module.params["event"] - - # Annotation params - msg = module.params["msg"] - annotated_by = module.params["annotated_by"] - level = module.params["level"] - instance_id = module.params["instance_id"] - event_epoch = module.params["event_epoch"] - - # Deploy params - revision_id = module.params["revision_id"] - deployed_by = module.params["deployed_by"] - deployed_to = module.params["deployed_to"] - repository = module.params["repository"] - - ################################################################## - # deploy requires revision_id - # annotation requires msg - # We verify these manually - ################################################################## - - if event == 'deploy': - if not revision_id: - module.fail_json(msg="revision_id required for deploy events") - try: - send_deploy_event(module, key, revision_id, deployed_by, deployed_to, repository) - except Exception as e: - module.fail_json(msg="unable to sent deploy event: %s" % to_native(e), - exception=traceback.format_exc()) - - if event == 'annotation': - if not msg: - module.fail_json(msg="msg required for annotation events") - try: - send_annotation_event(module, key, msg, annotated_by, level, instance_id, event_epoch) - except Exception as e: - module.fail_json(msg="unable to sent annotation event: %s" % to_native(e), - exception=traceback.format_exc()) - - changed = True - module.exit_json(changed=changed, deployed_by=deployed_by) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/monitoring/statsd.py b/ansible_collections/community/general/plugins/modules/monitoring/statsd.py deleted file mode 100644 index b0785164..00000000 --- a/ansible_collections/community/general/plugins/modules/monitoring/statsd.py +++ /dev/null @@ -1,170 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' -module: statsd -short_description: Send metrics to StatsD -version_added: 2.1.0 -description: - - The C(statsd) module sends metrics to StatsD. - - For more information, see U(https://statsd-metrics.readthedocs.io/en/latest/). - - Supported metric types are C(counter) and C(gauge). - Currently unupported metric types are C(timer), C(set), and C(gaugedelta). -author: "Mark Mercado (@mamercad)" -requirements: - - statsd -options: - state: - type: str - description: - - State of the check, only C(present) makes sense. - choices: ["present"] - default: present - host: - type: str - default: localhost - description: - - StatsD host (hostname or IP) to send metrics to. - port: - type: int - default: 8125 - description: - - The port on C(host) which StatsD is listening on. - protocol: - type: str - default: udp - choices: ["udp", "tcp"] - description: - - The transport protocol to send metrics over. - timeout: - type: float - default: 1.0 - description: - - Sender timeout, only applicable if C(protocol) is C(tcp). - metric: - type: str - required: true - description: - - The name of the metric. - metric_type: - type: str - required: true - choices: ["counter", "gauge"] - description: - - The type of metric. - metric_prefix: - type: str - description: - - The prefix to add to the metric. - value: - type: int - required: true - description: - - The value of the metric. - delta: - type: bool - default: false - description: - - If the metric is of type C(gauge), change the value by C(delta). -''' - -EXAMPLES = ''' -- name: Increment the metric my_counter by 1 - community.general.statsd: - host: localhost - port: 9125 - protocol: tcp - metric: my_counter - metric_type: counter - value: 1 - -- name: Set the gauge my_gauge to 7 - community.general.statsd: - host: localhost - port: 9125 - protocol: tcp - metric: my_gauge - metric_type: gauge - value: 7 -''' - - -from ansible.module_utils.basic import (AnsibleModule, missing_required_lib) - -try: - from statsd import StatsClient, TCPStatsClient - HAS_STATSD = True -except ImportError: - HAS_STATSD = False - - -def udp_statsd_client(**client_params): - return StatsClient(**client_params) - - -def tcp_statsd_client(**client_params): - return TCPStatsClient(**client_params) - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - state=dict(type='str', default='present', choices=['present']), - host=dict(type='str', default='localhost'), - port=dict(type='int', default=8125), - protocol=dict(type='str', default='udp', choices=['udp', 'tcp']), - timeout=dict(type='float', default=1.0), - metric=dict(type='str', required=True), - metric_type=dict(type='str', required=True, choices=['counter', 'gauge']), - metric_prefix=dict(type='str', default=''), - value=dict(type='int', required=True), - delta=dict(type='bool', default=False), - ), - supports_check_mode=False - ) - - if not HAS_STATSD: - module.fail_json(msg=missing_required_lib('statsd')) - - host = module.params.get('host') - port = module.params.get('port') - protocol = module.params.get('protocol') - timeout = module.params.get('timeout') - metric = module.params.get('metric') - metric_type = module.params.get('metric_type') - metric_prefix = module.params.get('metric_prefix') - value = module.params.get('value') - delta = module.params.get('delta') - - if protocol == 'udp': - client = udp_statsd_client(host=host, port=port, prefix=metric_prefix, maxudpsize=512, ipv6=False) - elif protocol == 'tcp': - client = tcp_statsd_client(host=host, port=port, timeout=timeout, prefix=metric_prefix, ipv6=False) - - metric_name = '%s/%s' % (metric_prefix, metric) if metric_prefix else metric - metric_display_value = '%s (delta=%s)' % (value, delta) if metric_type == 'gauge' else value - - try: - if metric_type == 'counter': - client.incr(metric, value) - elif metric_type == 'gauge': - client.gauge(metric, value, delta=delta) - - except Exception as exc: - module.fail_json(msg='Failed sending to StatsD %s' % str(exc)) - - finally: - if protocol == 'tcp': - client.close() - - module.exit_json(msg="Sent %s %s -> %s to StatsD" % (metric_type, metric_name, str(metric_display_value)), changed=True) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/monitoring/statusio_maintenance.py b/ansible_collections/community/general/plugins/modules/monitoring/statusio_maintenance.py deleted file mode 100644 index 10f733d4..00000000 --- a/ansible_collections/community/general/plugins/modules/monitoring/statusio_maintenance.py +++ /dev/null @@ -1,467 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2015, Benjamin Copeland (@bhcopeland) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' - -module: statusio_maintenance -short_description: Create maintenance windows for your status.io dashboard -description: - - Creates a maintenance window for status.io - - Deletes a maintenance window for status.io -notes: - - You can use the apiary API url (http://docs.statusio.apiary.io/) to - capture API traffic - - Use start_date and start_time with minutes to set future maintenance window -author: Benjamin Copeland (@bhcopeland) -options: - title: - type: str - description: - - A descriptive title for the maintenance window - default: "A new maintenance window" - desc: - type: str - description: - - Message describing the maintenance window - default: "Created by Ansible" - state: - type: str - description: - - Desired state of the package. - default: "present" - choices: ["present", "absent"] - api_id: - type: str - description: - - Your unique API ID from status.io - required: true - api_key: - type: str - description: - - Your unique API Key from status.io - required: true - statuspage: - type: str - description: - - Your unique StatusPage ID from status.io - required: true - url: - type: str - description: - - Status.io API URL. A private apiary can be used instead. - default: "https://api.status.io" - components: - type: list - elements: str - description: - - The given name of your component (server name) - aliases: ['component'] - containers: - type: list - elements: str - description: - - The given name of your container (data center) - aliases: ['container'] - all_infrastructure_affected: - description: - - If it affects all components and containers - type: bool - default: 'no' - automation: - description: - - Automatically start and end the maintenance window - type: bool - default: 'no' - maintenance_notify_now: - description: - - Notify subscribers now - type: bool - default: 'no' - maintenance_notify_72_hr: - description: - - Notify subscribers 72 hours before maintenance start time - type: bool - default: 'no' - maintenance_notify_24_hr: - description: - - Notify subscribers 24 hours before maintenance start time - type: bool - default: 'no' - maintenance_notify_1_hr: - description: - - Notify subscribers 1 hour before maintenance start time - type: bool - default: 'no' - maintenance_id: - type: str - description: - - The maintenance id number when deleting a maintenance window - minutes: - type: int - description: - - The length of time in UTC that the maintenance will run - (starting from playbook runtime) - default: 10 - start_date: - type: str - description: - - Date maintenance is expected to start (Month/Day/Year) (UTC) - - End Date is worked out from start_date + minutes - start_time: - type: str - description: - - Time maintenance is expected to start (Hour:Minutes) (UTC) - - End Time is worked out from start_time + minutes -''' - -EXAMPLES = ''' -- name: Create a maintenance window for 10 minutes on server1, with automation to stop the maintenance - community.general.statusio_maintenance: - title: Router Upgrade from ansible - desc: Performing a Router Upgrade - components: server1.example.com - api_id: api_id - api_key: api_key - statuspage: statuspage_id - maintenance_notify_1_hr: True - automation: True - -- name: Create a maintenance window for 60 minutes on server1 and server2 - community.general.statusio_maintenance: - title: Routine maintenance - desc: Some security updates - components: - - server1.example.com - - server2.example.com - minutes: 60 - api_id: api_id - api_key: api_key - statuspage: statuspage_id - maintenance_notify_1_hr: True - automation: True - delegate_to: localhost - -- name: Create a future maintenance window for 24 hours to all hosts inside the Primary Data Center - community.general.statusio_maintenance: - title: Data center downtime - desc: Performing a Upgrade to our data center - components: Primary Data Center - api_id: api_id - api_key: api_key - statuspage: statuspage_id - start_date: 01/01/2016 - start_time: 12:00 - minutes: 1440 - -- name: Delete a maintenance window - community.general.statusio_maintenance: - title: Remove a maintenance window - maintenance_id: 561f90faf74bc94a4700087b - statuspage: statuspage_id - api_id: api_id - api_key: api_key - state: absent - -''' -# TODO: Add RETURN documentation. -RETURN = ''' # ''' - -import datetime -import json - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.urls import open_url - - -def get_api_auth_headers(api_id, api_key, url, statuspage): - - headers = { - "x-api-id": api_id, - "x-api-key": api_key, - "Content-Type": "application/json" - } - - try: - response = open_url( - url + "/v2/component/list/" + statuspage, headers=headers) - data = json.loads(response.read()) - if data['status']['message'] == 'Authentication failed': - return 1, None, None, "Authentication failed: " \ - "Check api_id/api_key and statuspage id." - else: - auth_headers = headers - auth_content = data - except Exception as e: - return 1, None, None, to_native(e) - return 0, auth_headers, auth_content, None - - -def get_component_ids(auth_content, components): - host_ids = [] - lower_components = [x.lower() for x in components] - for result in auth_content["result"]: - if result['name'].lower() in lower_components: - data = { - "component_id": result["_id"], - "container_id": result["containers"][0]["_id"] - } - host_ids.append(data) - lower_components.remove(result['name'].lower()) - if len(lower_components): - # items not found in the api - return 1, None, lower_components - return 0, host_ids, None - - -def get_container_ids(auth_content, containers): - host_ids = [] - lower_containers = [x.lower() for x in containers] - for result in auth_content["result"]: - if result["containers"][0]["name"].lower() in lower_containers: - data = { - "component_id": result["_id"], - "container_id": result["containers"][0]["_id"] - } - host_ids.append(data) - lower_containers.remove(result["containers"][0]["name"].lower()) - - if len(lower_containers): - # items not found in the api - return 1, None, lower_containers - return 0, host_ids, None - - -def get_date_time(start_date, start_time, minutes): - returned_date = [] - if start_date and start_time: - try: - datetime.datetime.strptime(start_date, '%m/%d/%Y') - returned_date.append(start_date) - except (NameError, ValueError): - return 1, None, "Not a valid start_date format." - try: - datetime.datetime.strptime(start_time, '%H:%M') - returned_date.append(start_time) - except (NameError, ValueError): - return 1, None, "Not a valid start_time format." - try: - # Work out end date/time based on minutes - date_time_start = datetime.datetime.strptime( - start_time + start_date, '%H:%M%m/%d/%Y') - delta = date_time_start + datetime.timedelta(minutes=minutes) - returned_date.append(delta.strftime("%m/%d/%Y")) - returned_date.append(delta.strftime("%H:%M")) - except (NameError, ValueError): - return 1, None, "Couldn't work out a valid date" - else: - now = datetime.datetime.utcnow() - delta = now + datetime.timedelta(minutes=minutes) - # start_date - returned_date.append(now.strftime("%m/%d/%Y")) - returned_date.append(now.strftime("%H:%M")) - # end_date - returned_date.append(delta.strftime("%m/%d/%Y")) - returned_date.append(delta.strftime("%H:%M")) - return 0, returned_date, None - - -def create_maintenance(auth_headers, url, statuspage, host_ids, - all_infrastructure_affected, automation, title, desc, - returned_date, maintenance_notify_now, - maintenance_notify_72_hr, maintenance_notify_24_hr, - maintenance_notify_1_hr): - returned_dates = [[x] for x in returned_date] - component_id = [] - container_id = [] - for val in host_ids: - component_id.append(val['component_id']) - container_id.append(val['container_id']) - try: - values = json.dumps({ - "statuspage_id": statuspage, - "components": component_id, - "containers": container_id, - "all_infrastructure_affected": str(int(all_infrastructure_affected)), - "automation": str(int(automation)), - "maintenance_name": title, - "maintenance_details": desc, - "date_planned_start": returned_dates[0], - "time_planned_start": returned_dates[1], - "date_planned_end": returned_dates[2], - "time_planned_end": returned_dates[3], - "maintenance_notify_now": str(int(maintenance_notify_now)), - "maintenance_notify_72_hr": str(int(maintenance_notify_72_hr)), - "maintenance_notify_24_hr": str(int(maintenance_notify_24_hr)), - "maintenance_notify_1_hr": str(int(maintenance_notify_1_hr)) - }) - response = open_url( - url + "/v2/maintenance/schedule", data=values, - headers=auth_headers) - data = json.loads(response.read()) - - if data["status"]["error"] == "yes": - return 1, None, data["status"]["message"] - except Exception as e: - return 1, None, to_native(e) - return 0, None, None - - -def delete_maintenance(auth_headers, url, statuspage, maintenance_id): - try: - values = json.dumps({ - "statuspage_id": statuspage, - "maintenance_id": maintenance_id, - }) - response = open_url( - url=url + "/v2/maintenance/delete", - data=values, - headers=auth_headers) - data = json.loads(response.read()) - if data["status"]["error"] == "yes": - return 1, None, "Invalid maintenance_id" - except Exception as e: - return 1, None, to_native(e) - return 0, None, None - - -def main(): - module = AnsibleModule( - argument_spec=dict( - api_id=dict(required=True), - api_key=dict(required=True, no_log=True), - statuspage=dict(required=True), - state=dict(required=False, default='present', - choices=['present', 'absent']), - url=dict(default='https://api.status.io', required=False), - components=dict(type='list', elements='str', required=False, default=None, - aliases=['component']), - containers=dict(type='list', elements='str', required=False, default=None, - aliases=['container']), - all_infrastructure_affected=dict(type='bool', default=False, - required=False), - automation=dict(type='bool', default=False, required=False), - title=dict(required=False, default='A new maintenance window'), - desc=dict(required=False, default='Created by Ansible'), - minutes=dict(type='int', required=False, default=10), - maintenance_notify_now=dict(type='bool', default=False, - required=False), - maintenance_notify_72_hr=dict(type='bool', default=False, - required=False), - maintenance_notify_24_hr=dict(type='bool', default=False, - required=False), - maintenance_notify_1_hr=dict(type='bool', default=False, - required=False), - maintenance_id=dict(required=False, default=None), - start_date=dict(default=None, required=False), - start_time=dict(default=None, required=False) - ), - supports_check_mode=True, - ) - - api_id = module.params['api_id'] - api_key = module.params['api_key'] - statuspage = module.params['statuspage'] - state = module.params['state'] - url = module.params['url'] - components = module.params['components'] - containers = module.params['containers'] - all_infrastructure_affected = module.params['all_infrastructure_affected'] - automation = module.params['automation'] - title = module.params['title'] - desc = module.params['desc'] - minutes = module.params['minutes'] - maintenance_notify_now = module.params['maintenance_notify_now'] - maintenance_notify_72_hr = module.params['maintenance_notify_72_hr'] - maintenance_notify_24_hr = module.params['maintenance_notify_24_hr'] - maintenance_notify_1_hr = module.params['maintenance_notify_1_hr'] - maintenance_id = module.params['maintenance_id'] - start_date = module.params['start_date'] - start_time = module.params['start_time'] - - if state == "present": - - if api_id and api_key: - (rc, auth_headers, auth_content, error) = \ - get_api_auth_headers(api_id, api_key, url, statuspage) - if rc != 0: - module.fail_json(msg="Failed to get auth keys: %s" % error) - else: - auth_headers = {} - auth_content = {} - - if minutes or start_time and start_date: - (rc, returned_date, error) = get_date_time( - start_date, start_time, minutes) - if rc != 0: - module.fail_json(msg="Failed to set date/time: %s" % error) - - if not components and not containers: - return module.fail_json(msg="A Component or Container must be " - "defined") - elif components and containers: - return module.fail_json(msg="Components and containers cannot " - "be used together") - else: - if components: - (rc, host_ids, error) = get_component_ids(auth_content, - components) - if rc != 0: - module.fail_json(msg="Failed to find component %s" % error) - - if containers: - (rc, host_ids, error) = get_container_ids(auth_content, - containers) - if rc != 0: - module.fail_json(msg="Failed to find container %s" % error) - - if module.check_mode: - module.exit_json(changed=True) - else: - (rc, dummy, error) = create_maintenance( - auth_headers, url, statuspage, host_ids, - all_infrastructure_affected, automation, - title, desc, returned_date, maintenance_notify_now, - maintenance_notify_72_hr, maintenance_notify_24_hr, - maintenance_notify_1_hr) - if rc == 0: - module.exit_json(changed=True, result="Successfully created " - "maintenance") - else: - module.fail_json(msg="Failed to create maintenance: %s" - % error) - - if state == "absent": - - if api_id and api_key: - (rc, auth_headers, auth_content, error) = \ - get_api_auth_headers(api_id, api_key, url, statuspage) - if rc != 0: - module.fail_json(msg="Failed to get auth keys: %s" % error) - else: - auth_headers = {} - - if module.check_mode: - module.exit_json(changed=True) - else: - (rc, dummy, error) = delete_maintenance( - auth_headers, url, statuspage, maintenance_id) - if rc == 0: - module.exit_json( - changed=True, - result="Successfully deleted maintenance" - ) - else: - module.fail_json( - msg="Failed to delete maintenance: %s" % error) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/monitoring/uptimerobot.py b/ansible_collections/community/general/plugins/modules/monitoring/uptimerobot.py deleted file mode 100644 index 833a7f19..00000000 --- a/ansible_collections/community/general/plugins/modules/monitoring/uptimerobot.py +++ /dev/null @@ -1,149 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' - -module: uptimerobot -short_description: Pause and start Uptime Robot monitoring -description: - - This module will let you start and pause Uptime Robot Monitoring -author: "Nate Kingsley (@nate-kingsley)" -requirements: - - Valid Uptime Robot API Key -options: - state: - type: str - description: - - Define whether or not the monitor should be running or paused. - required: true - choices: [ "started", "paused" ] - monitorid: - type: str - description: - - ID of the monitor to check. - required: true - apikey: - type: str - description: - - Uptime Robot API key. - required: true -notes: - - Support for adding and removing monitors and alert contacts has not yet been implemented. -''' - -EXAMPLES = ''' -- name: Pause the monitor with an ID of 12345 - community.general.uptimerobot: - monitorid: 12345 - apikey: 12345-1234512345 - state: paused - -- name: Start the monitor with an ID of 12345 - community.general.uptimerobot: - monitorid: 12345 - apikey: 12345-1234512345 - state: started -''' - -import json - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.urls import fetch_url -from ansible.module_utils.common.text.converters import to_text - - -API_BASE = "https://api.uptimerobot.com/" - -API_ACTIONS = dict( - status='getMonitors?', - editMonitor='editMonitor?' -) - -API_FORMAT = 'json' -API_NOJSONCALLBACK = 1 -CHANGED_STATE = False -SUPPORTS_CHECK_MODE = False - - -def checkID(module, params): - - data = urlencode(params) - full_uri = API_BASE + API_ACTIONS['status'] + data - req, info = fetch_url(module, full_uri) - result = to_text(req.read()) - jsonresult = json.loads(result) - req.close() - return jsonresult - - -def startMonitor(module, params): - - params['monitorStatus'] = 1 - data = urlencode(params) - full_uri = API_BASE + API_ACTIONS['editMonitor'] + data - req, info = fetch_url(module, full_uri) - result = to_text(req.read()) - jsonresult = json.loads(result) - req.close() - return jsonresult['stat'] - - -def pauseMonitor(module, params): - - params['monitorStatus'] = 0 - data = urlencode(params) - full_uri = API_BASE + API_ACTIONS['editMonitor'] + data - req, info = fetch_url(module, full_uri) - result = to_text(req.read()) - jsonresult = json.loads(result) - req.close() - return jsonresult['stat'] - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - state=dict(required=True, choices=['started', 'paused']), - apikey=dict(required=True, no_log=True), - monitorid=dict(required=True) - ), - supports_check_mode=SUPPORTS_CHECK_MODE - ) - - params = dict( - apiKey=module.params['apikey'], - monitors=module.params['monitorid'], - monitorID=module.params['monitorid'], - format=API_FORMAT, - noJsonCallback=API_NOJSONCALLBACK - ) - - check_result = checkID(module, params) - - if check_result['stat'] != "ok": - module.fail_json( - msg="failed", - result=check_result['message'] - ) - - if module.params['state'] == 'started': - monitor_result = startMonitor(module, params) - else: - monitor_result = pauseMonitor(module, params) - - module.exit_json( - msg="success", - result=monitor_result - ) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/mqtt.py b/ansible_collections/community/general/plugins/modules/mqtt.py deleted file mode 120000 index ff68a256..00000000 --- a/ansible_collections/community/general/plugins/modules/mqtt.py +++ /dev/null @@ -1 +0,0 @@ -notification/mqtt.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/mssql_db.py b/ansible_collections/community/general/plugins/modules/mssql_db.py deleted file mode 120000 index cc1927b6..00000000 --- a/ansible_collections/community/general/plugins/modules/mssql_db.py +++ /dev/null @@ -1 +0,0 @@ -database/mssql/mssql_db.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/mssql_script.py b/ansible_collections/community/general/plugins/modules/mssql_script.py deleted file mode 120000 index 78f004fc..00000000 --- a/ansible_collections/community/general/plugins/modules/mssql_script.py +++ /dev/null @@ -1 +0,0 @@ -database/mssql/mssql_script.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/nagios.py b/ansible_collections/community/general/plugins/modules/nagios.py deleted file mode 120000 index e088e06d..00000000 --- a/ansible_collections/community/general/plugins/modules/nagios.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/nagios.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/net_tools/cloudflare_dns.py b/ansible_collections/community/general/plugins/modules/net_tools/cloudflare_dns.py deleted file mode 100644 index 4e82e0af..00000000 --- a/ansible_collections/community/general/plugins/modules/net_tools/cloudflare_dns.py +++ /dev/null @@ -1,884 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016 Michael Gruener -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: cloudflare_dns -author: -- Michael Gruener (@mgruener) -requirements: - - python >= 2.6 -short_description: Manage Cloudflare DNS records -description: - - "Manages dns records via the Cloudflare API, see the docs: U(https://api.cloudflare.com/)." -options: - api_token: - description: - - API token. - - Required for api token authentication. - - "You can obtain your API token from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)." - - Can be specified in C(CLOUDFLARE_TOKEN) environment variable since community.general 2.0.0. - type: str - required: false - version_added: '0.2.0' - account_api_key: - description: - - Account API key. - - Required for api keys authentication. - - "You can obtain your API key from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)." - type: str - required: false - aliases: [ account_api_token ] - account_email: - description: - - Account email. Required for API keys authentication. - type: str - required: false - algorithm: - description: - - Algorithm number. - - Required for C(type=DS) and C(type=SSHFP) when C(state=present). - type: int - cert_usage: - description: - - Certificate usage number. - - Required for C(type=TLSA) when C(state=present). - type: int - choices: [ 0, 1, 2, 3 ] - hash_type: - description: - - Hash type number. - - Required for C(type=DS), C(type=SSHFP) and C(type=TLSA) when C(state=present). - type: int - choices: [ 1, 2 ] - key_tag: - description: - - DNSSEC key tag. - - Needed for C(type=DS) when C(state=present). - type: int - port: - description: - - Service port. - - Required for C(type=SRV) and C(type=TLSA). - type: int - priority: - description: - - Record priority. - - Required for C(type=MX) and C(type=SRV) - default: 1 - type: int - proto: - description: - - Service protocol. Required for C(type=SRV) and C(type=TLSA). - - Common values are TCP and UDP. - - Before Ansible 2.6 only TCP and UDP were available. - type: str - proxied: - description: - - Proxy through Cloudflare network or just use DNS. - type: bool - default: no - record: - description: - - Record to add. - - Required if C(state=present). - - Default is C(@) (e.g. the zone name). - type: str - default: '@' - aliases: [ name ] - selector: - description: - - Selector number. - - Required for C(type=TLSA) when C(state=present). - choices: [ 0, 1 ] - type: int - service: - description: - - Record service. - - Required for I(type=SRV). - type: str - solo: - description: - - Whether the record should be the only one for that record type and record name. - - Only use with C(state=present). - - This will delete all other records with the same record name and type. - type: bool - state: - description: - - Whether the record(s) should exist or not. - type: str - choices: [ absent, present ] - default: present - timeout: - description: - - Timeout for Cloudflare API calls. - type: int - default: 30 - ttl: - description: - - The TTL to give the new record. - - Must be between 120 and 2,147,483,647 seconds, or 1 for automatic. - type: int - default: 1 - type: - description: - - The type of DNS record to create. Required if C(state=present). - - C(type=DS), C(type=SSHFP) and C(type=TLSA) added in Ansible 2.7. - type: str - choices: [ A, AAAA, CNAME, DS, MX, NS, SPF, SRV, SSHFP, TLSA, TXT ] - value: - description: - - The record value. - - Required for C(state=present). - type: str - aliases: [ content ] - weight: - description: - - Service weight. - - Required for C(type=SRV). - type: int - default: 1 - zone: - description: - - The name of the Zone to work with (e.g. "example.com"). - - The Zone must already exist. - type: str - required: true - aliases: [ domain ] -''' - -EXAMPLES = r''' -- name: Create a test.example.net A record to point to 127.0.0.1 - community.general.cloudflare_dns: - zone: example.net - record: test - type: A - value: 127.0.0.1 - account_email: test@example.com - account_api_key: dummyapitoken - register: record - -- name: Create a record using api token - community.general.cloudflare_dns: - zone: example.net - record: test - type: A - value: 127.0.0.1 - api_token: dummyapitoken - -- name: Create a example.net CNAME record to example.com - community.general.cloudflare_dns: - zone: example.net - type: CNAME - value: example.com - account_email: test@example.com - account_api_key: dummyapitoken - state: present - -- name: Change its TTL - community.general.cloudflare_dns: - zone: example.net - type: CNAME - value: example.com - ttl: 600 - account_email: test@example.com - account_api_key: dummyapitoken - state: present - -- name: Delete the record - community.general.cloudflare_dns: - zone: example.net - type: CNAME - value: example.com - account_email: test@example.com - account_api_key: dummyapitoken - state: absent - -- name: Create a example.net CNAME record to example.com and proxy through Cloudflare's network - community.general.cloudflare_dns: - zone: example.net - type: CNAME - value: example.com - proxied: yes - account_email: test@example.com - account_api_key: dummyapitoken - state: present - -# This deletes all other TXT records named "test.example.net" -- name: Create TXT record "test.example.net" with value "unique value" - community.general.cloudflare_dns: - domain: example.net - record: test - type: TXT - value: unique value - solo: true - account_email: test@example.com - account_api_key: dummyapitoken - state: present - -- name: Create an SRV record _foo._tcp.example.net - community.general.cloudflare_dns: - domain: example.net - service: foo - proto: tcp - port: 3500 - priority: 10 - weight: 20 - type: SRV - value: fooserver.example.net - -- name: Create a SSHFP record login.example.com - community.general.cloudflare_dns: - zone: example.com - record: login - type: SSHFP - algorithm: 4 - hash_type: 2 - value: 9dc1d6742696d2f51ca1f1a78b3d16a840f7d111eb9454239e70db31363f33e1 - -- name: Create a TLSA record _25._tcp.mail.example.com - community.general.cloudflare_dns: - zone: example.com - record: mail - port: 25 - proto: tcp - type: TLSA - cert_usage: 3 - selector: 1 - hash_type: 1 - value: 6b76d034492b493e15a7376fccd08e63befdad0edab8e442562f532338364bf3 - -- name: Create a DS record for subdomain.example.com - community.general.cloudflare_dns: - zone: example.com - record: subdomain - type: DS - key_tag: 5464 - algorithm: 8 - hash_type: 2 - value: B4EB5AC4467D2DFB3BAF9FB9961DC1B6FED54A58CDFAA3E465081EC86F89BFAB -''' - -RETURN = r''' -record: - description: A dictionary containing the record data. - returned: success, except on record deletion - type: complex - contains: - content: - description: The record content (details depend on record type). - returned: success - type: str - sample: 192.0.2.91 - created_on: - description: The record creation date. - returned: success - type: str - sample: "2016-03-25T19:09:42.516553Z" - data: - description: Additional record data. - returned: success, if type is SRV, DS, SSHFP or TLSA - type: dict - sample: { - name: "jabber", - port: 8080, - priority: 10, - proto: "_tcp", - service: "_xmpp", - target: "jabberhost.sample.com", - weight: 5, - } - id: - description: The record ID. - returned: success - type: str - sample: f9efb0549e96abcb750de63b38c9576e - locked: - description: No documentation available. - returned: success - type: bool - sample: False - meta: - description: No documentation available. - returned: success - type: dict - sample: { auto_added: false } - modified_on: - description: Record modification date. - returned: success - type: str - sample: "2016-03-25T19:09:42.516553Z" - name: - description: The record name as FQDN (including _service and _proto for SRV). - returned: success - type: str - sample: www.sample.com - priority: - description: Priority of the MX record. - returned: success, if type is MX - type: int - sample: 10 - proxiable: - description: Whether this record can be proxied through Cloudflare. - returned: success - type: bool - sample: False - proxied: - description: Whether the record is proxied through Cloudflare. - returned: success - type: bool - sample: False - ttl: - description: The time-to-live for the record. - returned: success - type: int - sample: 300 - type: - description: The record type. - returned: success - type: str - sample: A - zone_id: - description: The ID of the zone containing the record. - returned: success - type: str - sample: abcede0bf9f0066f94029d2e6b73856a - zone_name: - description: The name of the zone containing the record. - returned: success - type: str - sample: sample.com -''' - -import json - -from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.common.text.converters import to_native, to_text -from ansible.module_utils.urls import fetch_url - - -def lowercase_string(param): - if not isinstance(param, str): - return param - return param.lower() - - -class CloudflareAPI(object): - - cf_api_endpoint = 'https://api.cloudflare.com/client/v4' - changed = False - - def __init__(self, module): - self.module = module - self.api_token = module.params['api_token'] - self.account_api_key = module.params['account_api_key'] - self.account_email = module.params['account_email'] - self.algorithm = module.params['algorithm'] - self.cert_usage = module.params['cert_usage'] - self.hash_type = module.params['hash_type'] - self.key_tag = module.params['key_tag'] - self.port = module.params['port'] - self.priority = module.params['priority'] - self.proto = lowercase_string(module.params['proto']) - self.proxied = module.params['proxied'] - self.selector = module.params['selector'] - self.record = lowercase_string(module.params['record']) - self.service = lowercase_string(module.params['service']) - self.is_solo = module.params['solo'] - self.state = module.params['state'] - self.timeout = module.params['timeout'] - self.ttl = module.params['ttl'] - self.type = module.params['type'] - self.value = module.params['value'] - self.weight = module.params['weight'] - self.zone = lowercase_string(module.params['zone']) - - if self.record == '@': - self.record = self.zone - - if (self.type in ['CNAME', 'NS', 'MX', 'SRV']) and (self.value is not None): - self.value = self.value.rstrip('.').lower() - - if (self.type == 'AAAA') and (self.value is not None): - self.value = self.value.lower() - - if (self.type == 'SRV'): - if (self.proto is not None) and (not self.proto.startswith('_')): - self.proto = '_' + self.proto - if (self.service is not None) and (not self.service.startswith('_')): - self.service = '_' + self.service - - if (self.type == 'TLSA'): - if (self.proto is not None) and (not self.proto.startswith('_')): - self.proto = '_' + self.proto - if (self.port is not None): - self.port = '_' + str(self.port) - - if not self.record.endswith(self.zone): - self.record = self.record + '.' + self.zone - - if (self.type == 'DS'): - if self.record == self.zone: - self.module.fail_json(msg="DS records only apply to subdomains.") - - def _cf_simple_api_call(self, api_call, method='GET', payload=None): - if self.api_token: - headers = { - 'Authorization': 'Bearer ' + self.api_token, - 'Content-Type': 'application/json', - } - else: - headers = { - 'X-Auth-Email': self.account_email, - 'X-Auth-Key': self.account_api_key, - 'Content-Type': 'application/json', - } - data = None - if payload: - try: - data = json.dumps(payload) - except Exception as e: - self.module.fail_json(msg="Failed to encode payload as JSON: %s " % to_native(e)) - - resp, info = fetch_url(self.module, - self.cf_api_endpoint + api_call, - headers=headers, - data=data, - method=method, - timeout=self.timeout) - - if info['status'] not in [200, 304, 400, 401, 403, 429, 405, 415]: - self.module.fail_json(msg="Failed API call {0}; got unexpected HTTP code {1}: {2}".format(api_call, info['status'], info.get('msg'))) - - error_msg = '' - if info['status'] == 401: - # Unauthorized - error_msg = "API user does not have permission; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call) - elif info['status'] == 403: - # Forbidden - error_msg = "API request not authenticated; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call) - elif info['status'] == 429: - # Too many requests - error_msg = "API client is rate limited; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call) - elif info['status'] == 405: - # Method not allowed - error_msg = "API incorrect HTTP method provided; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call) - elif info['status'] == 415: - # Unsupported Media Type - error_msg = "API request is not valid JSON; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call) - elif info['status'] == 400: - # Bad Request - error_msg = "API bad request; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call) - - result = None - try: - content = resp.read() - except AttributeError: - if info['body']: - content = info['body'] - else: - error_msg += "; The API response was empty" - - if content: - try: - result = json.loads(to_text(content, errors='surrogate_or_strict')) - except (getattr(json, 'JSONDecodeError', ValueError)) as e: - error_msg += "; Failed to parse API response with error {0}: {1}".format(to_native(e), content) - - # Without a valid/parsed JSON response no more error processing can be done - if result is None: - self.module.fail_json(msg=error_msg) - - if 'success' not in result: - error_msg += "; Unexpected error details: {0}".format(result.get('error')) - self.module.fail_json(msg=error_msg) - - if not result['success']: - error_msg += "; Error details: " - for error in result['errors']: - error_msg += "code: {0}, error: {1}; ".format(error['code'], error['message']) - if 'error_chain' in error: - for chain_error in error['error_chain']: - error_msg += "code: {0}, error: {1}; ".format(chain_error['code'], chain_error['message']) - self.module.fail_json(msg=error_msg) - - return result, info['status'] - - def _cf_api_call(self, api_call, method='GET', payload=None): - result, status = self._cf_simple_api_call(api_call, method, payload) - - data = result['result'] - - if 'result_info' in result: - pagination = result['result_info'] - if pagination['total_pages'] > 1: - next_page = int(pagination['page']) + 1 - parameters = ['page={0}'.format(next_page)] - # strip "page" parameter from call parameters (if there are any) - if '?' in api_call: - raw_api_call, query = api_call.split('?', 1) - parameters += [param for param in query.split('&') if not param.startswith('page')] - else: - raw_api_call = api_call - while next_page <= pagination['total_pages']: - raw_api_call += '?' + '&'.join(parameters) - result, status = self._cf_simple_api_call(raw_api_call, method, payload) - data += result['result'] - next_page += 1 - - return data, status - - def _get_zone_id(self, zone=None): - if not zone: - zone = self.zone - - zones = self.get_zones(zone) - if len(zones) > 1: - self.module.fail_json(msg="More than one zone matches {0}".format(zone)) - - if len(zones) < 1: - self.module.fail_json(msg="No zone found with name {0}".format(zone)) - - return zones[0]['id'] - - def get_zones(self, name=None): - if not name: - name = self.zone - param = '' - if name: - param = '?' + urlencode({'name': name}) - zones, status = self._cf_api_call('/zones' + param) - return zones - - def get_dns_records(self, zone_name=None, type=None, record=None, value=''): - if not zone_name: - zone_name = self.zone - if not type: - type = self.type - if not record: - record = self.record - # necessary because None as value means to override user - # set module value - if (not value) and (value is not None): - value = self.value - - zone_id = self._get_zone_id() - api_call = '/zones/{0}/dns_records'.format(zone_id) - query = {} - if type: - query['type'] = type - if record: - query['name'] = record - if value: - query['content'] = value - if query: - api_call += '?' + urlencode(query) - - records, status = self._cf_api_call(api_call) - return records - - def delete_dns_records(self, **kwargs): - params = {} - for param in ['port', 'proto', 'service', 'solo', 'type', 'record', 'value', 'weight', 'zone', - 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag']: - if param in kwargs: - params[param] = kwargs[param] - else: - params[param] = getattr(self, param) - - records = [] - content = params['value'] - search_record = params['record'] - if params['type'] == 'SRV': - if not (params['value'] is None or params['value'] == ''): - content = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value'] - search_record = params['service'] + '.' + params['proto'] + '.' + params['record'] - elif params['type'] == 'DS': - if not (params['value'] is None or params['value'] == ''): - content = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value'] - elif params['type'] == 'SSHFP': - if not (params['value'] is None or params['value'] == ''): - content = str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value'] - elif params['type'] == 'TLSA': - if not (params['value'] is None or params['value'] == ''): - content = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value'] - search_record = params['port'] + '.' + params['proto'] + '.' + params['record'] - if params['solo']: - search_value = None - else: - search_value = content - - records = self.get_dns_records(params['zone'], params['type'], search_record, search_value) - - for rr in records: - if params['solo']: - if not ((rr['type'] == params['type']) and (rr['name'] == search_record) and (rr['content'] == content)): - self.changed = True - if not self.module.check_mode: - result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE') - else: - self.changed = True - if not self.module.check_mode: - result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE') - return self.changed - - def ensure_dns_record(self, **kwargs): - params = {} - for param in ['port', 'priority', 'proto', 'proxied', 'service', 'ttl', 'type', 'record', 'value', 'weight', 'zone', - 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag']: - if param in kwargs: - params[param] = kwargs[param] - else: - params[param] = getattr(self, param) - - search_value = params['value'] - search_record = params['record'] - new_record = None - if (params['type'] is None) or (params['record'] is None): - self.module.fail_json(msg="You must provide a type and a record to create a new record") - - if (params['type'] in ['A', 'AAAA', 'CNAME', 'TXT', 'MX', 'NS', 'SPF']): - if not params['value']: - self.module.fail_json(msg="You must provide a non-empty value to create this record type") - - # there can only be one CNAME per record - # ignoring the value when searching for existing - # CNAME records allows us to update the value if it - # changes - if params['type'] == 'CNAME': - search_value = None - - new_record = { - "type": params['type'], - "name": params['record'], - "content": params['value'], - "ttl": params['ttl'] - } - - if (params['type'] in ['A', 'AAAA', 'CNAME']): - new_record["proxied"] = params["proxied"] - - if params['type'] == 'MX': - for attr in [params['priority'], params['value']]: - if (attr is None) or (attr == ''): - self.module.fail_json(msg="You must provide priority and a value to create this record type") - new_record = { - "type": params['type'], - "name": params['record'], - "content": params['value'], - "priority": params['priority'], - "ttl": params['ttl'] - } - - if params['type'] == 'SRV': - for attr in [params['port'], params['priority'], params['proto'], params['service'], params['weight'], params['value']]: - if (attr is None) or (attr == ''): - self.module.fail_json(msg="You must provide port, priority, proto, service, weight and a value to create this record type") - srv_data = { - "target": params['value'], - "port": params['port'], - "weight": params['weight'], - "priority": params['priority'], - "name": params['record'][:-len('.' + params['zone'])], - "proto": params['proto'], - "service": params['service'] - } - new_record = {"type": params['type'], "ttl": params['ttl'], 'data': srv_data} - search_value = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value'] - search_record = params['service'] + '.' + params['proto'] + '.' + params['record'] - - if params['type'] == 'DS': - for attr in [params['key_tag'], params['algorithm'], params['hash_type'], params['value']]: - if (attr is None) or (attr == ''): - self.module.fail_json(msg="You must provide key_tag, algorithm, hash_type and a value to create this record type") - ds_data = { - "key_tag": params['key_tag'], - "algorithm": params['algorithm'], - "digest_type": params['hash_type'], - "digest": params['value'], - } - new_record = { - "type": params['type'], - "name": params['record'], - 'data': ds_data, - "ttl": params['ttl'], - } - search_value = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value'] - - if params['type'] == 'SSHFP': - for attr in [params['algorithm'], params['hash_type'], params['value']]: - if (attr is None) or (attr == ''): - self.module.fail_json(msg="You must provide algorithm, hash_type and a value to create this record type") - sshfp_data = { - "fingerprint": params['value'], - "type": params['hash_type'], - "algorithm": params['algorithm'], - } - new_record = { - "type": params['type'], - "name": params['record'], - 'data': sshfp_data, - "ttl": params['ttl'], - } - search_value = str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value'] - - if params['type'] == 'TLSA': - for attr in [params['port'], params['proto'], params['cert_usage'], params['selector'], params['hash_type'], params['value']]: - if (attr is None) or (attr == ''): - self.module.fail_json(msg="You must provide port, proto, cert_usage, selector, hash_type and a value to create this record type") - search_record = params['port'] + '.' + params['proto'] + '.' + params['record'] - tlsa_data = { - "usage": params['cert_usage'], - "selector": params['selector'], - "matching_type": params['hash_type'], - "certificate": params['value'], - } - new_record = { - "type": params['type'], - "name": search_record, - 'data': tlsa_data, - "ttl": params['ttl'], - } - search_value = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value'] - - zone_id = self._get_zone_id(params['zone']) - records = self.get_dns_records(params['zone'], params['type'], search_record, search_value) - # in theory this should be impossible as cloudflare does not allow - # the creation of duplicate records but lets cover it anyways - if len(records) > 1: - self.module.fail_json(msg="More than one record already exists for the given attributes. That should be impossible, please open an issue!") - # record already exists, check if it must be updated - if len(records) == 1: - cur_record = records[0] - do_update = False - if (params['ttl'] is not None) and (cur_record['ttl'] != params['ttl']): - do_update = True - if (params['priority'] is not None) and ('priority' in cur_record) and (cur_record['priority'] != params['priority']): - do_update = True - if ('proxied' in new_record) and ('proxied' in cur_record) and (cur_record['proxied'] != params['proxied']): - do_update = True - if ('data' in new_record) and ('data' in cur_record): - if (cur_record['data'] != new_record['data']): - do_update = True - if (params['type'] == 'CNAME') and (cur_record['content'] != new_record['content']): - do_update = True - if do_update: - if self.module.check_mode: - result = new_record - else: - result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(zone_id, records[0]['id']), 'PUT', new_record) - self.changed = True - return result, self.changed - else: - return records, self.changed - if self.module.check_mode: - result = new_record - else: - result, info = self._cf_api_call('/zones/{0}/dns_records'.format(zone_id), 'POST', new_record) - self.changed = True - return result, self.changed - - -def main(): - module = AnsibleModule( - argument_spec=dict( - api_token=dict( - type="str", - required=False, - no_log=True, - fallback=(env_fallback, ["CLOUDFLARE_TOKEN"]), - ), - account_api_key=dict(type='str', required=False, no_log=True, aliases=['account_api_token']), - account_email=dict(type='str', required=False), - algorithm=dict(type='int'), - cert_usage=dict(type='int', choices=[0, 1, 2, 3]), - hash_type=dict(type='int', choices=[1, 2]), - key_tag=dict(type='int', no_log=False), - port=dict(type='int'), - priority=dict(type='int', default=1), - proto=dict(type='str'), - proxied=dict(type='bool', default=False), - record=dict(type='str', default='@', aliases=['name']), - selector=dict(type='int', choices=[0, 1]), - service=dict(type='str'), - solo=dict(type='bool'), - state=dict(type='str', default='present', choices=['absent', 'present']), - timeout=dict(type='int', default=30), - ttl=dict(type='int', default=1), - type=dict(type='str', choices=['A', 'AAAA', 'CNAME', 'DS', 'MX', 'NS', 'SPF', 'SRV', 'SSHFP', 'TLSA', 'TXT']), - value=dict(type='str', aliases=['content']), - weight=dict(type='int', default=1), - zone=dict(type='str', required=True, aliases=['domain']), - ), - supports_check_mode=True, - required_if=[ - ('state', 'present', ['record', 'type', 'value']), - ('state', 'absent', ['record']), - ('type', 'SRV', ['proto', 'service']), - ('type', 'TLSA', ['proto', 'port']), - ], - ) - - if not module.params['api_token'] and not (module.params['account_api_key'] and module.params['account_email']): - module.fail_json(msg="Either api_token or account_api_key and account_email params are required.") - if module.params['type'] == 'SRV': - if not ((module.params['weight'] is not None and module.params['port'] is not None - and not (module.params['value'] is None or module.params['value'] == '')) - or (module.params['weight'] is None and module.params['port'] is None - and (module.params['value'] is None or module.params['value'] == ''))): - module.fail_json(msg="For SRV records the params weight, port and value all need to be defined, or not at all.") - - if module.params['type'] == 'SSHFP': - if not ((module.params['algorithm'] is not None and module.params['hash_type'] is not None - and not (module.params['value'] is None or module.params['value'] == '')) - or (module.params['algorithm'] is None and module.params['hash_type'] is None - and (module.params['value'] is None or module.params['value'] == ''))): - module.fail_json(msg="For SSHFP records the params algorithm, hash_type and value all need to be defined, or not at all.") - - if module.params['type'] == 'TLSA': - if not ((module.params['cert_usage'] is not None and module.params['selector'] is not None and module.params['hash_type'] is not None - and not (module.params['value'] is None or module.params['value'] == '')) - or (module.params['cert_usage'] is None and module.params['selector'] is None and module.params['hash_type'] is None - and (module.params['value'] is None or module.params['value'] == ''))): - module.fail_json(msg="For TLSA records the params cert_usage, selector, hash_type and value all need to be defined, or not at all.") - - if module.params['type'] == 'DS': - if not ((module.params['key_tag'] is not None and module.params['algorithm'] is not None and module.params['hash_type'] is not None - and not (module.params['value'] is None or module.params['value'] == '')) - or (module.params['key_tag'] is None and module.params['algorithm'] is None and module.params['hash_type'] is None - and (module.params['value'] is None or module.params['value'] == ''))): - module.fail_json(msg="For DS records the params key_tag, algorithm, hash_type and value all need to be defined, or not at all.") - - changed = False - cf_api = CloudflareAPI(module) - - # sanity checks - if cf_api.is_solo and cf_api.state == 'absent': - module.fail_json(msg="solo=true can only be used with state=present") - - # perform add, delete or update (only the TTL can be updated) of one or - # more records - if cf_api.state == 'present': - # delete all records matching record name + type - if cf_api.is_solo: - changed = cf_api.delete_dns_records(solo=cf_api.is_solo) - result, changed = cf_api.ensure_dns_record() - if isinstance(result, list): - module.exit_json(changed=changed, result={'record': result[0]}) - - module.exit_json(changed=changed, result={'record': result}) - else: - # force solo to False, just to be sure - changed = cf_api.delete_dns_records(solo=False) - module.exit_json(changed=changed) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/net_tools/dnsimple.py b/ansible_collections/community/general/plugins/modules/net_tools/dnsimple.py deleted file mode 100644 index d6606cc0..00000000 --- a/ansible_collections/community/general/plugins/modules/net_tools/dnsimple.py +++ /dev/null @@ -1,517 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Ansible Project -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: dnsimple -short_description: Interface with dnsimple.com (a DNS hosting service) -description: - - "Manages domains and records via the DNSimple API, see the docs: U(http://developer.dnsimple.com/)." -options: - account_email: - description: - - Account email. If omitted, the environment variables C(DNSIMPLE_EMAIL) and C(DNSIMPLE_API_TOKEN) will be looked for. - - "If those aren't found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)." - - "C(.dnsimple) config files are only supported in dnsimple-python<2.0.0" - type: str - account_api_token: - description: - - Account API token. See I(account_email) for more information. - type: str - domain: - description: - - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNSimple. - - If omitted, a list of domains will be returned. - - If domain is present but the domain doesn't exist, it will be created. - type: str - record: - description: - - Record to add, if blank a record for the domain will be created, supports the wildcard (*). - type: str - record_ids: - description: - - List of records to ensure they either exist or do not exist. - type: list - elements: str - type: - description: - - The type of DNS record to create. - choices: [ 'A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL', 'CAA' ] - type: str - ttl: - description: - - The TTL to give the new record in seconds. - default: 3600 - type: int - value: - description: - - Record value. - - Must be specified when trying to ensure a record exists. - type: str - priority: - description: - - Record priority. - type: int - state: - description: - - whether the record should exist or not. - choices: [ 'present', 'absent' ] - default: present - type: str - solo: - description: - - Whether the record should be the only one for that record type and record name. - - Only use with C(state) is set to C(present) on a record. - type: 'bool' - default: no - sandbox: - description: - - Use the DNSimple sandbox environment. - - Requires a dedicated account in the dnsimple sandbox environment. - - Check U(https://developer.dnsimple.com/sandbox/) for more information. - type: 'bool' - default: no - version_added: 3.5.0 -requirements: - - "dnsimple >= 1.0.0" -notes: - - "Support for C(dnsimple < 2) is deprecated and will be removed in community.general 5.0.0." -author: "Alex Coomans (@drcapulet)" -''' - -EXAMPLES = ''' -- name: Authenticate using email and API token and fetch all domains - community.general.dnsimple: - account_email: test@example.com - account_api_token: dummyapitoken - delegate_to: localhost - -- name: Delete a domain - community.general.dnsimple: - domain: my.com - state: absent - delegate_to: localhost - -- name: Create a test.my.com A record to point to 127.0.0.1 - community.general.dnsimple: - domain: my.com - record: test - type: A - value: 127.0.0.1 - delegate_to: localhost - register: record - -- name: Delete record using record_ids - community.general.dnsimple: - domain: my.com - record_ids: '{{ record["id"] }}' - state: absent - delegate_to: localhost - -- name: Create a my.com CNAME record to example.com - community.general.dnsimple: - domain: my.com - record: '' - type: CNAME - value: example.com - state: present - delegate_to: localhost - -- name: Change TTL value for a record - community.general.dnsimple: - domain: my.com - record: '' - type: CNAME - value: example.com - ttl: 600 - state: present - delegate_to: localhost - -- name: Delete the record - community.general.dnsimple: - domain: my.com - record: '' - type: CNAME - value: example.com - state: absent - delegate_to: localhost -''' - -RETURN = r"""# """ - -import traceback -import re - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - - -class DNSimpleV1(): - """class which uses dnsimple-python < 2""" - - def __init__(self, account_email, account_api_token, sandbox, module): - """init""" - self.module = module - self.account_email = account_email - self.account_api_token = account_api_token - self.sandbox = sandbox - self.dnsimple_client() - - def dnsimple_client(self): - """creates a dnsimple client object""" - if self.account_email and self.account_api_token: - self.client = DNSimple(sandbox=self.sandbox, email=self.account_email, api_token=self.account_api_token) - else: - self.client = DNSimple(sandbox=self.sandbox) - - def get_all_domains(self): - """returns a list of all domains""" - domain_list = self.client.domains() - return [d['domain'] for d in domain_list] - - def get_domain(self, domain): - """returns a single domain by name or id""" - try: - dr = self.client.domain(domain)['domain'] - except DNSimpleException as e: - exception_string = str(e.args[0]['message']) - if re.match(r"^Domain .+ not found$", exception_string): - dr = None - else: - raise - return dr - - def create_domain(self, domain): - """create a single domain""" - return self.client.add_domain(domain)['domain'] - - def delete_domain(self, domain): - """delete a single domain""" - self.client.delete(domain) - - def get_records(self, domain, dnsimple_filter=None): - """return dns ressource records which match a specified filter""" - return [r['record'] for r in self.client.records(str(domain), params=dnsimple_filter)] - - def delete_record(self, domain, rid): - """delete a single dns ressource record""" - self.client.delete_record(str(domain), rid) - - def update_record(self, domain, rid, ttl=None, priority=None): - """update a single dns ressource record""" - data = {} - if ttl: - data['ttl'] = ttl - if priority: - data['priority'] = priority - return self.client.update_record(str(domain), str(rid), data)['record'] - - def create_record(self, domain, name, record_type, content, ttl=None, priority=None): - """create a single dns ressource record""" - data = { - 'name': name, - 'type': record_type, - 'content': content, - } - if ttl: - data['ttl'] = ttl - if priority: - data['priority'] = priority - return self.client.add_record(str(domain), data)['record'] - - -class DNSimpleV2(): - """class which uses dnsimple-python >= 2""" - - def __init__(self, account_email, account_api_token, sandbox, module): - """init""" - self.module = module - self.account_email = account_email - self.account_api_token = account_api_token - self.sandbox = sandbox - self.pagination_per_page = 30 - self.dnsimple_client() - self.dnsimple_account() - - def dnsimple_client(self): - """creates a dnsimple client object""" - if self.account_email and self.account_api_token: - client = Client(sandbox=self.sandbox, email=self.account_email, access_token=self.account_api_token) - else: - msg = "Option account_email or account_api_token not provided. " \ - "Dnsimple authentiction with a .dnsimple config file is not " \ - "supported with dnsimple-python>=2.0.0" - raise DNSimpleException(msg) - client.identity.whoami() - self.client = client - - def dnsimple_account(self): - """select a dnsimple account. If a user token is used for authentication, - this user must only have access to a single account""" - account = self.client.identity.whoami().data.account - # user supplied a user token instead of account api token - if not account: - accounts = Accounts(self.client).list_accounts().data - if len(accounts) != 1: - msg = "The provided dnsimple token is a user token with multiple accounts." \ - "Use an account token or a user token with access to a single account." \ - "See https://support.dnsimple.com/articles/api-access-token/" - raise DNSimpleException(msg) - account = accounts[0] - self.account = account - - def get_all_domains(self): - """returns a list of all domains""" - domain_list = self._get_paginated_result(self.client.domains.list_domains, account_id=self.account.id) - return [d.__dict__ for d in domain_list] - - def get_domain(self, domain): - """returns a single domain by name or id""" - try: - dr = self.client.domains.get_domain(self.account.id, domain).data.__dict__ - except DNSimpleException as e: - exception_string = str(e.message) - if re.match(r"^Domain .+ not found$", exception_string): - dr = None - else: - raise - return dr - - def create_domain(self, domain): - """create a single domain""" - return self.client.domains.create_domain(self.account.id, domain).data.__dict__ - - def delete_domain(self, domain): - """delete a single domain""" - self.client.domains.delete_domain(self.account.id, domain) - - def get_records(self, zone, dnsimple_filter=None): - """return dns ressource records which match a specified filter""" - records_list = self._get_paginated_result(self.client.zones.list_records, - account_id=self.account.id, - zone=zone, filter=dnsimple_filter) - return [d.__dict__ for d in records_list] - - def delete_record(self, domain, rid): - """delete a single dns ressource record""" - self.client.zones.delete_record(self.account.id, domain, rid) - - def update_record(self, domain, rid, ttl=None, priority=None): - """update a single dns ressource record""" - zr = ZoneRecordUpdateInput(ttl=ttl, priority=priority) - result = self.client.zones.update_record(self.account.id, str(domain), str(rid), zr).data.__dict__ - return result - - def create_record(self, domain, name, record_type, content, ttl=None, priority=None): - """create a single dns ressource record""" - zr = ZoneRecordInput(name=name, type=record_type, content=content, ttl=ttl, priority=priority) - return self.client.zones.create_record(self.account.id, str(domain), zr).data.__dict__ - - def _get_paginated_result(self, operation, **options): - """return all results of a paginated api response""" - records_pagination = operation(per_page=self.pagination_per_page, **options).pagination - result_list = [] - for page in range(1, records_pagination.total_pages + 1): - page_data = operation(per_page=self.pagination_per_page, page=page, **options).data - result_list.extend(page_data) - return result_list - - -DNSIMPLE_IMP_ERR = [] -HAS_DNSIMPLE = False -try: - # try to import dnsimple >= 2.0.0 - from dnsimple import Client, DNSimpleException - from dnsimple.service import Accounts - from dnsimple.version import version as dnsimple_version - from dnsimple.struct.zone_record import ZoneRecordUpdateInput, ZoneRecordInput - HAS_DNSIMPLE = True -except ImportError: - DNSIMPLE_IMP_ERR.append(traceback.format_exc()) - -if not HAS_DNSIMPLE: - # try to import dnsimple < 2.0.0 - try: - from dnsimple.dnsimple import __version__ as dnsimple_version - from dnsimple import DNSimple - from dnsimple.dnsimple import DNSimpleException - HAS_DNSIMPLE = True - except ImportError: - DNSIMPLE_IMP_ERR.append(traceback.format_exc()) - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback - - -def main(): - module = AnsibleModule( - argument_spec=dict( - account_email=dict(type='str', fallback=(env_fallback, ['DNSIMPLE_EMAIL'])), - account_api_token=dict(type='str', - no_log=True, - fallback=(env_fallback, ['DNSIMPLE_API_TOKEN'])), - domain=dict(type='str'), - record=dict(type='str'), - record_ids=dict(type='list', elements='str'), - type=dict(type='str', choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', - 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', - 'PTR', 'AAAA', 'SSHFP', 'HINFO', - 'POOL', 'CAA']), - ttl=dict(type='int', default=3600), - value=dict(type='str'), - priority=dict(type='int'), - state=dict(type='str', choices=['present', 'absent'], default='present'), - solo=dict(type='bool', default=False), - sandbox=dict(type='bool', default=False), - ), - required_together=[ - ['record', 'value'] - ], - supports_check_mode=True, - ) - - if not HAS_DNSIMPLE: - module.fail_json(msg=missing_required_lib('dnsimple'), exception=DNSIMPLE_IMP_ERR[0]) - - account_email = module.params.get('account_email') - account_api_token = module.params.get('account_api_token') - domain = module.params.get('domain') - record = module.params.get('record') - record_ids = module.params.get('record_ids') - record_type = module.params.get('type') - ttl = module.params.get('ttl') - value = module.params.get('value') - priority = module.params.get('priority') - state = module.params.get('state') - is_solo = module.params.get('solo') - sandbox = module.params.get('sandbox') - - DNSIMPLE_MAJOR_VERSION = LooseVersion(dnsimple_version).version[0] - - try: - if DNSIMPLE_MAJOR_VERSION > 1: - ds = DNSimpleV2(account_email, account_api_token, sandbox, module) - else: - module.deprecate( - 'Support for python-dnsimple < 2 is deprecated. ' - 'Update python-dnsimple to version >= 2.0.0', - version='5.0.0', collection_name='community.general' - ) - ds = DNSimpleV1(account_email, account_api_token, sandbox, module) - # Let's figure out what operation we want to do - # No domain, return a list - if not domain: - all_domains = ds.get_all_domains() - module.exit_json(changed=False, result=all_domains) - - # Domain & No record - if record is None and not record_ids: - if domain.isdigit(): - typed_domain = int(domain) - else: - typed_domain = str(domain) - dr = ds.get_domain(typed_domain) - # domain does not exist - if state == 'present': - if dr: - module.exit_json(changed=False, result=dr) - else: - if module.check_mode: - module.exit_json(changed=True) - else: - response = ds.create_domain(domain) - module.exit_json(changed=True, result=response) - # state is absent - else: - if dr: - if not module.check_mode: - ds.delete_domain(domain) - module.exit_json(changed=True) - else: - module.exit_json(changed=False) - - # need the not none check since record could be an empty string - if record is not None: - if not record_type: - module.fail_json(msg="Missing the record type") - if not value: - module.fail_json(msg="Missing the record value") - - records_list = ds.get_records(domain, dnsimple_filter={'name': record}) - rr = next((r for r in records_list if r['name'] == record and r['type'] == record_type and r['content'] == value), None) - if state == 'present': - changed = False - if is_solo: - # delete any records that have the same name and record type - same_type = [r['id'] for r in records_list if r['name'] == record and r['type'] == record_type] - if rr: - same_type = [rid for rid in same_type if rid != rr['id']] - if same_type: - if not module.check_mode: - for rid in same_type: - ds.delete_record(domain, rid) - changed = True - if rr: - # check if we need to update - if rr['ttl'] != ttl or rr['priority'] != priority: - if module.check_mode: - module.exit_json(changed=True) - else: - response = ds.update_record(domain, rr['id'], ttl, priority) - module.exit_json(changed=True, result=response) - else: - module.exit_json(changed=changed, result=rr) - else: - # create it - if module.check_mode: - module.exit_json(changed=True) - else: - response = ds.create_record(domain, record, record_type, value, ttl, priority) - module.exit_json(changed=True, result=response) - # state is absent - else: - if rr: - if not module.check_mode: - ds.delete_record(domain, rr['id']) - module.exit_json(changed=True) - else: - module.exit_json(changed=False) - - # Make sure these record_ids either all exist or none - if record_ids: - current_records = ds.get_records(domain, dnsimple_filter=None) - current_record_ids = [str(d['id']) for d in current_records] - wanted_record_ids = [str(r) for r in record_ids] - if state == 'present': - difference = list(set(wanted_record_ids) - set(current_record_ids)) - if difference: - module.fail_json(msg="Missing the following records: %s" % difference) - else: - module.exit_json(changed=False) - # state is absent - else: - difference = list(set(wanted_record_ids) & set(current_record_ids)) - if difference: - if not module.check_mode: - for rid in difference: - ds.delete_record(domain, rid) - module.exit_json(changed=True) - else: - module.exit_json(changed=False) - - except DNSimpleException as e: - if DNSIMPLE_MAJOR_VERSION > 1: - module.fail_json(msg="DNSimple exception: %s" % e.message) - else: - module.fail_json(msg="DNSimple exception: %s" % str(e.args[0]['message'])) - module.fail_json(msg="Unknown what you wanted me to do") - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/net_tools/dnsimple_info.py b/ansible_collections/community/general/plugins/modules/net_tools/dnsimple_info.py deleted file mode 100644 index 4ac22be0..00000000 --- a/ansible_collections/community/general/plugins/modules/net_tools/dnsimple_info.py +++ /dev/null @@ -1,335 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Edward Hilgendorf, -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: dnsimple_info - -short_description: Pull basic info from DNSimple API - -version_added: "4.2.0" - -description: Retrieve existing records and domains from DNSimple API. - -options: - name: - description: - - The domain name to retrieve info from. - - Will return all associated records for this domain if specified. - - If not specified, will return all domains associated with the account ID. - type: str - - account_id: - description: The account ID to query. - required: true - type: str - - api_key: - description: The API key to use. - required: true - type: str - - record: - description: - - The record to find. - - If specified, only this record will be returned instead of all records. - required: false - type: str - - sandbox: - description: Whether or not to use sandbox environment. - required: false - default: false - type: bool - -author: - - Edward Hilgendorf (@edhilgendorf) -''' - -EXAMPLES = r''' -- name: Get all domains from an account - community.general.dnsimple_info: - account_id: "1234" - api_key: "1234" - -- name: Get all records from a domain - community.general.dnsimple_info: - name: "example.com" - account_id: "1234" - api_key: "1234" - -- name: Get all info from a matching record - community.general.dnsimple_info: - name: "example.com" - record: "subdomain" - account_id: "1234" - api_key: "1234" -''' - -RETURN = r''' -dnsimple_domain_info: - description: Returns a list of dictionaries of all domains associated with the supplied account ID. - type: list - elements: dict - returned: success when I(name) is not specified - sample: - - account_id: 1234 - created_at: '2021-10-16T21:25:42Z' - id: 123456 - last_transferred_at: - name: example.com - reverse: false - secondary: false - updated_at: '2021-11-10T20:22:50Z' - contains: - account_id: - description: The account ID. - type: int - created_at: - description: When the domain entry was created. - type: str - id: - description: ID of the entry. - type: int - last_transferred_at: - description: Date the domain was transferred, or empty if not. - type: str - name: - description: Name of the record. - type: str - reverse: - description: Whether or not it is a reverse zone record. - type: bool - updated_at: - description: When the domain entry was updated. - type: str - -dnsimple_records_info: - description: Returns a list of dictionaries with all records for the domain supplied. - type: list - elements: dict - returned: success when I(name) is specified, but I(record) is not - sample: - - content: ns1.dnsimple.com admin.dnsimple.com - created_at: '2021-10-16T19:07:34Z' - id: 12345 - name: 'catheadbiscuit' - parent_id: null - priority: null - regions: - - global - system_record: true - ttl: 3600 - type: SOA - updated_at: '2021-11-15T23:55:51Z' - zone_id: example.com - contains: - content: - description: Content of the returned record. - type: str - created_at: - description: When the domain entry was created. - type: str - id: - description: ID of the entry. - type: int - name: - description: Name of the record. - type: str - parent_id: - description: Parent record or null. - type: int - priority: - description: Priority setting of the record. - type: str - regions: - description: List of regions where the record is available. - type: list - system_record: - description: Whether or not it is a system record. - type: bool - ttl: - description: Record TTL. - type: int - type: - description: Record type. - type: str - updated_at: - description: When the domain entry was updated. - type: str - zone_id: - description: ID of the zone that the record is associated with. - type: str -dnsimple_record_info: - description: Returns a list of dictionaries that match the record supplied. - returned: success when I(name) and I(record) are specified - type: list - elements: dict - sample: - - content: 1.2.3.4 - created_at: '2021-11-15T23:55:51Z' - id: 123456 - name: catheadbiscuit - parent_id: null - priority: null - regions: - - global - system_record: false - ttl: 3600 - type: A - updated_at: '2021-11-15T23:55:51Z' - zone_id: example.com - contains: - content: - description: Content of the returned record. - type: str - created_at: - description: When the domain entry was created. - type: str - id: - description: ID of the entry. - type: int - name: - description: Name of the record. - type: str - parent_id: - description: Parent record or null. - type: int - priority: - description: Priority setting of the record. - type: str - regions: - description: List of regions where the record is available. - type: list - system_record: - description: Whether or not it is a system record. - type: bool - ttl: - description: Record TTL. - type: int - type: - description: Record type. - type: str - updated_at: - description: When the domain entry was updated. - type: str - zone_id: - description: ID of the zone that the record is associated with. - type: str -''' - -import traceback -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.basic import missing_required_lib -import json - -try: - from requests import Request, Session -except ImportError: - HAS_ANOTHER_LIBRARY = False - ANOTHER_LIBRARY_IMPORT_ERROR = traceback.format_exc() -else: - HAS_ANOTHER_LIBRARY = True - - -def build_url(account, key, is_sandbox): - headers = {'Accept': 'application/json', - 'Authorization': 'Bearer ' + key} - url = 'https://api{sandbox}.dnsimple.com/'.format( - sandbox=".sandbox" if is_sandbox else "") + 'v2/' + account - req = Request(url=url, headers=headers) - prepped_request = req.prepare() - return prepped_request - - -def iterate_data(module, request_object): - base_url = request_object.url - response = Session().send(request_object) - if 'pagination' in response.json(): - data = response.json()["data"] - pages = response.json()["pagination"]["total_pages"] - if int(pages) > 1: - for page in range(1, pages): - page = page + 1 - request_object.url = base_url + '&page=' + str(page) - new_results = Session().send(request_object) - data = data + new_results.json()["data"] - return(data) - else: - module.fail_json('API Call failed, check ID, key and sandbox values') - - -def record_info(dnsimple_mod, req_obj): - req_obj.url, req_obj.method = req_obj.url + '/zones/' + dnsimple_mod.params["name"] + '/records?name=' + dnsimple_mod.params["record"], 'GET' - return iterate_data(dnsimple_mod, req_obj) - - -def domain_info(dnsimple_mod, req_obj): - req_obj.url, req_obj.method = req_obj.url + '/zones/' + dnsimple_mod.params["name"] + '/records?per_page=100', 'GET' - return iterate_data(dnsimple_mod, req_obj) - - -def account_info(dnsimple_mod, req_obj): - req_obj.url, req_obj.method = req_obj.url + '/zones/?per_page=100', 'GET' - return iterate_data(dnsimple_mod, req_obj) - - -def main(): - # define available arguments/parameters a user can pass to the module - fields = { - "account_id": {"required": True, "type": "str"}, - "api_key": {"required": True, "type": "str", "no_log": True}, - "name": {"required": False, "type": "str"}, - "record": {"required": False, "type": "str"}, - "sandbox": {"required": False, "type": "bool", "default": False} - } - - result = { - 'changed': False - } - - module = AnsibleModule( - argument_spec=fields, - supports_check_mode=True - ) - - params = module.params - req = build_url(params['account_id'], - params['api_key'], - params['sandbox']) - - if not HAS_ANOTHER_LIBRARY: - # Needs: from ansible.module_utils.basic import missing_required_lib - module.exit_json( - msg=missing_required_lib('another_library'), - exception=ANOTHER_LIBRARY_IMPORT_ERROR) - - # At minimum we need account and key - if params['account_id'] and params['api_key']: - # If we have a record return info on that record - if params['name'] and params['record']: - result['dnsimple_record_info'] = record_info(module, req) - module.exit_json(**result) - - # If we have the account only and domain, return records for the domain - elif params['name']: - result['dnsimple_records_info'] = domain_info(module, req) - module.exit_json(**result) - - # If we have the account only, return domains - else: - result['dnsimple_domain_info'] = account_info(module, req) - module.exit_json(**result) - else: - module.fail_json(msg="Need at least account_id and api_key") - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/net_tools/dnsmadeeasy.py b/ansible_collections/community/general/plugins/modules/net_tools/dnsmadeeasy.py deleted file mode 100644 index 75135c82..00000000 --- a/ansible_collections/community/general/plugins/modules/net_tools/dnsmadeeasy.py +++ /dev/null @@ -1,717 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: dnsmadeeasy -short_description: Interface with dnsmadeeasy.com (a DNS hosting service). -description: - - > - Manages DNS records via the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation of domains or - monitor/account support yet. See: U(https://www.dnsmadeeasy.com/integration/restapi/) -options: - account_key: - description: - - Account API Key. - required: true - type: str - - account_secret: - description: - - Account Secret Key. - required: true - type: str - - domain: - description: - - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNS Made Easy (e.g. "839989") for faster - resolution - required: true - type: str - - sandbox: - description: - - Decides if the sandbox API should be used. Otherwise (default) the production API of DNS Made Easy is used. - type: bool - default: 'no' - - record_name: - description: - - Record name to get/create/delete/update. If record_name is not specified; all records for the domain will be returned in "result" regardless - of the state argument. - type: str - - record_type: - description: - - Record type. - choices: [ 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT' ] - type: str - - record_value: - description: - - > - Record value. HTTPRED: , MX: , NS: , PTR: , - SRV: , TXT: " - - > - If record_value is not specified; no changes will be made and the record will be returned in 'result' - (in other words, this module can be used to fetch a record's current id, type, and ttl) - type: str - - record_ttl: - description: - - record's "Time to live". Number of seconds the record remains cached in DNS servers. - default: 1800 - type: int - - state: - description: - - whether the record should exist or not - required: true - choices: [ 'present', 'absent' ] - type: str - - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - type: bool - default: 'yes' - - monitor: - description: - - If C(yes), add or change the monitor. This is applicable only for A records. - type: bool - default: 'no' - - systemDescription: - description: - - Description used by the monitor. - default: '' - type: str - - maxEmails: - description: - - Number of emails sent to the contact list by the monitor. - default: 1 - type: int - - protocol: - description: - - Protocol used by the monitor. - default: 'HTTP' - choices: ['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS'] - type: str - - port: - description: - - Port used by the monitor. - default: 80 - type: int - - sensitivity: - description: - - Number of checks the monitor performs before a failover occurs where Low = 8, Medium = 5,and High = 3. - default: 'Medium' - choices: ['Low', 'Medium', 'High'] - type: str - - contactList: - description: - - Name or id of the contact list that the monitor will notify. - - The default C('') means the Account Owner. - default: '' - type: str - - httpFqdn: - description: - - The fully qualified domain name used by the monitor. - type: str - - httpFile: - description: - - The file at the Fqdn that the monitor queries for HTTP or HTTPS. - type: str - - httpQueryString: - description: - - The string in the httpFile that the monitor queries for HTTP or HTTPS. - type: str - - failover: - description: - - If C(yes), add or change the failover. This is applicable only for A records. - type: bool - default: 'no' - - autoFailover: - description: - - If true, fallback to the primary IP address is manual after a failover. - - If false, fallback to the primary IP address is automatic after a failover. - type: bool - default: 'no' - - ip1: - description: - - Primary IP address for the failover. - - Required if adding or changing the monitor or failover. - type: str - - ip2: - description: - - Secondary IP address for the failover. - - Required if adding or changing the failover. - type: str - - ip3: - description: - - Tertiary IP address for the failover. - type: str - - ip4: - description: - - Quaternary IP address for the failover. - type: str - - ip5: - description: - - Quinary IP address for the failover. - type: str - -notes: - - The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few - seconds of actual time by using NTP. - - This module returns record(s) and monitor(s) in the "result" element when 'state' is set to 'present'. - These values can be be registered and used in your playbooks. - - Only A records can have a monitor or failover. - - To add failover, the 'failover', 'autoFailover', 'port', 'protocol', 'ip1', and 'ip2' options are required. - - To add monitor, the 'monitor', 'port', 'protocol', 'maxEmails', 'systemDescription', and 'ip1' options are required. - - The monitor and the failover will share 'port', 'protocol', and 'ip1' options. - -requirements: [ hashlib, hmac ] -author: "Brice Burgess (@briceburg)" -''' - -EXAMPLES = ''' -- name: Fetch my.com domain records - community.general.dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - register: response - -- name: Create a record - community.general.dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - record_name: test - record_type: A - record_value: 127.0.0.1 - -- name: Update the previously created record - community.general.dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - record_name: test - record_value: 192.0.2.23 - -- name: Fetch a specific record - community.general.dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - record_name: test - register: response - -- name: Delete a record - community.general.dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - record_type: A - state: absent - record_name: test - -- name: Add a failover - community.general.dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - record_name: test - record_type: A - record_value: 127.0.0.1 - failover: True - ip1: 127.0.0.2 - ip2: 127.0.0.3 - -- name: Add a failover - community.general.dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - record_name: test - record_type: A - record_value: 127.0.0.1 - failover: True - ip1: 127.0.0.2 - ip2: 127.0.0.3 - ip3: 127.0.0.4 - ip4: 127.0.0.5 - ip5: 127.0.0.6 - -- name: Add a monitor - community.general.dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - record_name: test - record_type: A - record_value: 127.0.0.1 - monitor: yes - ip1: 127.0.0.2 - protocol: HTTP # default - port: 80 # default - maxEmails: 1 - systemDescription: Monitor Test A record - contactList: my contact list - -- name: Add a monitor with http options - community.general.dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - record_name: test - record_type: A - record_value: 127.0.0.1 - monitor: yes - ip1: 127.0.0.2 - protocol: HTTP # default - port: 80 # default - maxEmails: 1 - systemDescription: Monitor Test A record - contactList: 1174 # contact list id - httpFqdn: http://my.com - httpFile: example - httpQueryString: some string - -- name: Add a monitor and a failover - community.general.dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - record_name: test - record_type: A - record_value: 127.0.0.1 - failover: True - ip1: 127.0.0.2 - ip2: 127.0.0.3 - monitor: yes - protocol: HTTPS - port: 443 - maxEmails: 1 - systemDescription: monitoring my.com status - contactList: emergencycontacts - -- name: Remove a failover - community.general.dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - record_name: test - record_type: A - record_value: 127.0.0.1 - failover: no - -- name: Remove a monitor - community.general.dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - record_name: test - record_type: A - record_value: 127.0.0.1 - monitor: no -''' - -# ============================================ -# DNSMadeEasy module specific support methods. -# - -import json -import hashlib -import hmac -import locale -from time import strftime, gmtime - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.six import string_types - - -class DME2(object): - - def __init__(self, apikey, secret, domain, sandbox, module): - self.module = module - - self.api = apikey - self.secret = secret - - if sandbox: - self.baseurl = 'https://api.sandbox.dnsmadeeasy.com/V2.0/' - self.module.warn(warning="Sandbox is enabled. All actions are made against the URL %s" % self.baseurl) - else: - self.baseurl = 'https://api.dnsmadeeasy.com/V2.0/' - - self.domain = str(domain) - self.domain_map = None # ["domain_name"] => ID - self.record_map = None # ["record_name"] => ID - self.records = None # ["record_ID"] => - self.all_records = None - self.contactList_map = None # ["contactList_name"] => ID - - # Lookup the domain ID if passed as a domain name vs. ID - if not self.domain.isdigit(): - self.domain = self.getDomainByName(self.domain)['id'] - - self.record_url = 'dns/managed/' + str(self.domain) + '/records' - self.monitor_url = 'monitor' - self.contactList_url = 'contactList' - - def _headers(self): - currTime = self._get_date() - hashstring = self._create_hash(currTime) - headers = {'x-dnsme-apiKey': self.api, - 'x-dnsme-hmac': hashstring, - 'x-dnsme-requestDate': currTime, - 'content-type': 'application/json'} - return headers - - def _get_date(self): - locale.setlocale(locale.LC_TIME, 'C') - return strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime()) - - def _create_hash(self, rightnow): - return hmac.new(self.secret.encode(), rightnow.encode(), hashlib.sha1).hexdigest() - - def query(self, resource, method, data=None): - url = self.baseurl + resource - if data and not isinstance(data, string_types): - data = urlencode(data) - - response, info = fetch_url(self.module, url, data=data, method=method, headers=self._headers()) - if info['status'] not in (200, 201, 204): - self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg'])) - - try: - return json.load(response) - except Exception: - return {} - - def getDomain(self, domain_id): - if not self.domain_map: - self._instMap('domain') - - return self.domains.get(domain_id, False) - - def getDomainByName(self, domain_name): - if not self.domain_map: - self._instMap('domain') - - return self.getDomain(self.domain_map.get(domain_name, 0)) - - def getDomains(self): - return self.query('dns/managed', 'GET')['data'] - - def getRecord(self, record_id): - if not self.record_map: - self._instMap('record') - - return self.records.get(record_id, False) - - # Try to find a single record matching this one. - # How we do this depends on the type of record. For instance, there - # can be several MX records for a single record_name while there can - # only be a single CNAME for a particular record_name. Note also that - # there can be several records with different types for a single name. - def getMatchingRecord(self, record_name, record_type, record_value): - # Get all the records if not already cached - if not self.all_records: - self.all_records = self.getRecords() - - if record_type in ["CNAME", "ANAME", "HTTPRED", "PTR"]: - for result in self.all_records: - if result['name'] == record_name and result['type'] == record_type: - return result - return False - elif record_type in ["A", "AAAA", "MX", "NS", "TXT", "SRV"]: - for result in self.all_records: - if record_type == "MX": - value = record_value.split(" ")[1] - # Note that TXT records are surrounded by quotes in the API response. - elif record_type == "TXT": - value = '"{0}"'.format(record_value) - elif record_type == "SRV": - value = record_value.split(" ")[3] - else: - value = record_value - if result['name'] == record_name and result['type'] == record_type and result['value'] == value: - return result - return False - else: - raise Exception('record_type not yet supported') - - def getRecords(self): - return self.query(self.record_url, 'GET')['data'] - - def _instMap(self, type): - # @TODO cache this call so it's executed only once per ansible execution - map = {} - results = {} - - # iterate over e.g. self.getDomains() || self.getRecords() - for result in getattr(self, 'get' + type.title() + 's')(): - - map[result['name']] = result['id'] - results[result['id']] = result - - # e.g. self.domain_map || self.record_map - setattr(self, type + '_map', map) - setattr(self, type + 's', results) # e.g. self.domains || self.records - - def prepareRecord(self, data): - return json.dumps(data, separators=(',', ':')) - - def createRecord(self, data): - # @TODO update the cache w/ resultant record + id when impleneted - return self.query(self.record_url, 'POST', data) - - def updateRecord(self, record_id, data): - # @TODO update the cache w/ resultant record + id when impleneted - return self.query(self.record_url + '/' + str(record_id), 'PUT', data) - - def deleteRecord(self, record_id): - # @TODO remove record from the cache when impleneted - return self.query(self.record_url + '/' + str(record_id), 'DELETE') - - def getMonitor(self, record_id): - return self.query(self.monitor_url + '/' + str(record_id), 'GET') - - def updateMonitor(self, record_id, data): - return self.query(self.monitor_url + '/' + str(record_id), 'PUT', data) - - def prepareMonitor(self, data): - return json.dumps(data, separators=(',', ':')) - - def getContactList(self, contact_list_id): - if not self.contactList_map: - self._instMap('contactList') - - return self.contactLists.get(contact_list_id, False) - - def getContactlists(self): - return self.query(self.contactList_url, 'GET')['data'] - - def getContactListByName(self, name): - if not self.contactList_map: - self._instMap('contactList') - - return self.getContactList(self.contactList_map.get(name, 0)) - -# =========================================== -# Module execution. -# - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - account_key=dict(required=True, no_log=True), - account_secret=dict(required=True, no_log=True), - domain=dict(required=True), - sandbox=dict(default=False, type='bool'), - state=dict(required=True, choices=['present', 'absent']), - record_name=dict(required=False), - record_type=dict(required=False, choices=[ - 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT']), - record_value=dict(required=False), - record_ttl=dict(required=False, default=1800, type='int'), - monitor=dict(default=False, type='bool'), - systemDescription=dict(default=''), - maxEmails=dict(default=1, type='int'), - protocol=dict(default='HTTP', choices=['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS']), - port=dict(default=80, type='int'), - sensitivity=dict(default='Medium', choices=['Low', 'Medium', 'High']), - contactList=dict(default=None), - httpFqdn=dict(required=False), - httpFile=dict(required=False), - httpQueryString=dict(required=False), - failover=dict(default=False, type='bool'), - autoFailover=dict(default=False, type='bool'), - ip1=dict(required=False), - ip2=dict(required=False), - ip3=dict(required=False), - ip4=dict(required=False), - ip5=dict(required=False), - validate_certs=dict(default=True, type='bool'), - ), - required_together=[ - ['record_value', 'record_ttl', 'record_type'] - ], - required_if=[ - ['failover', True, ['autoFailover', 'port', 'protocol', 'ip1', 'ip2']], - ['monitor', True, ['port', 'protocol', 'maxEmails', 'systemDescription', 'ip1']] - ] - ) - - protocols = dict(TCP=1, UDP=2, HTTP=3, DNS=4, SMTP=5, HTTPS=6) - sensitivities = dict(Low=8, Medium=5, High=3) - - DME = DME2(module.params["account_key"], module.params[ - "account_secret"], module.params["domain"], module.params["sandbox"], module) - state = module.params["state"] - record_name = module.params["record_name"] - record_type = module.params["record_type"] - record_value = module.params["record_value"] - - # Follow Keyword Controlled Behavior - if record_name is None: - domain_records = DME.getRecords() - if not domain_records: - module.fail_json( - msg="The requested domain name is not accessible with this api_key; try using its ID if known.") - module.exit_json(changed=False, result=domain_records) - - # Fetch existing record + Build new one - current_record = DME.getMatchingRecord(record_name, record_type, record_value) - new_record = {'name': record_name} - for i in ["record_value", "record_type", "record_ttl"]: - if not module.params[i] is None: - new_record[i[len("record_"):]] = module.params[i] - # Special handling for mx record - if new_record["type"] == "MX": - new_record["mxLevel"] = new_record["value"].split(" ")[0] - new_record["value"] = new_record["value"].split(" ")[1] - - # Special handling for SRV records - if new_record["type"] == "SRV": - new_record["priority"] = new_record["value"].split(" ")[0] - new_record["weight"] = new_record["value"].split(" ")[1] - new_record["port"] = new_record["value"].split(" ")[2] - new_record["value"] = new_record["value"].split(" ")[3] - - # Fetch existing monitor if the A record indicates it should exist and build the new monitor - current_monitor = dict() - new_monitor = dict() - if current_record and current_record['type'] == 'A': - current_monitor = DME.getMonitor(current_record['id']) - - # Build the new monitor - for i in ['monitor', 'systemDescription', 'protocol', 'port', 'sensitivity', 'maxEmails', - 'contactList', 'httpFqdn', 'httpFile', 'httpQueryString', - 'failover', 'autoFailover', 'ip1', 'ip2', 'ip3', 'ip4', 'ip5']: - if module.params[i] is not None: - if i == 'protocol': - # The API requires protocol to be a numeric in the range 1-6 - new_monitor['protocolId'] = protocols[module.params[i]] - elif i == 'sensitivity': - # The API requires sensitivity to be a numeric of 8, 5, or 3 - new_monitor[i] = sensitivities[module.params[i]] - elif i == 'contactList': - # The module accepts either the name or the id of the contact list - contact_list_id = module.params[i] - if not contact_list_id.isdigit() and contact_list_id != '': - contact_list = DME.getContactListByName(contact_list_id) - if not contact_list: - module.fail_json(msg="Contact list {0} does not exist".format(contact_list_id)) - contact_list_id = contact_list.get('id', '') - new_monitor['contactListId'] = contact_list_id - else: - # The module option names match the API field names - new_monitor[i] = module.params[i] - - # Compare new record against existing one - record_changed = False - if current_record: - for i in new_record: - # Remove leading and trailing quote character from values because TXT records - # are surrounded by quotes. - if str(current_record[i]).strip('"') != str(new_record[i]): - record_changed = True - new_record['id'] = str(current_record['id']) - - monitor_changed = False - if current_monitor: - for i in new_monitor: - if str(current_monitor.get(i)) != str(new_monitor[i]): - monitor_changed = True - - # Follow Keyword Controlled Behavior - if state == 'present': - # return the record if no value is specified - if "value" not in new_record: - if not current_record: - module.fail_json( - msg="A record with name '%s' does not exist for domain '%s.'" % (record_name, module.params['domain'])) - module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor)) - - # create record and monitor as the record does not exist - if not current_record: - record = DME.createRecord(DME.prepareRecord(new_record)) - if new_monitor.get('monitor') and record_type == "A": - monitor = DME.updateMonitor(record['id'], DME.prepareMonitor(new_monitor)) - module.exit_json(changed=True, result=dict(record=record, monitor=monitor)) - else: - module.exit_json(changed=True, result=dict(record=record, monitor=current_monitor)) - - # update the record - updated = False - if record_changed: - DME.updateRecord(current_record['id'], DME.prepareRecord(new_record)) - updated = True - if monitor_changed: - DME.updateMonitor(current_monitor['recordId'], DME.prepareMonitor(new_monitor)) - updated = True - if updated: - module.exit_json(changed=True, result=dict(record=new_record, monitor=new_monitor)) - - # return the record (no changes) - module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor)) - - elif state == 'absent': - changed = False - # delete the record (and the monitor/failover) if it exists - if current_record: - DME.deleteRecord(current_record['id']) - module.exit_json(changed=True) - - # record does not exist, return w/o change. - module.exit_json(changed=changed) - - else: - module.fail_json( - msg="'%s' is an unknown value for the state argument" % state) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/net_tools/gandi_livedns.py b/ansible_collections/community/general/plugins/modules/net_tools/gandi_livedns.py deleted file mode 100644 index 61242885..00000000 --- a/ansible_collections/community/general/plugins/modules/net_tools/gandi_livedns.py +++ /dev/null @@ -1,187 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019 Gregory Thiemonge -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: gandi_livedns -author: -- Gregory Thiemonge (@gthiemonge) -version_added: "2.3.0" -short_description: Manage Gandi LiveDNS records -description: -- "Manages DNS records by the Gandi LiveDNS API, see the docs: U(https://doc.livedns.gandi.net/)." -options: - api_key: - description: - - Account API token. - type: str - required: true - record: - description: - - Record to add. - type: str - required: true - state: - description: - - Whether the record(s) should exist or not. - type: str - choices: [ absent, present ] - default: present - ttl: - description: - - The TTL to give the new record. - - Required when I(state=present). - type: int - type: - description: - - The type of DNS record to create. - type: str - required: true - values: - description: - - The record values. - - Required when I(state=present). - type: list - elements: str - domain: - description: - - The name of the Domain to work with (for example, "example.com"). - required: true - type: str -notes: -- Supports C(check_mode). -''' - -EXAMPLES = r''' -- name: Create a test A record to point to 127.0.0.1 in the my.com domain - community.general.gandi_livedns: - domain: my.com - record: test - type: A - values: - - 127.0.0.1 - ttl: 7200 - api_key: dummyapitoken - register: record - -- name: Create a mail CNAME record to www.my.com domain - community.general.gandi_livedns: - domain: my.com - type: CNAME - record: mail - values: - - www - ttl: 7200 - api_key: dummyapitoken - state: present - -- name: Change its TTL - community.general.gandi_livedns: - domain: my.com - type: CNAME - record: mail - values: - - www - ttl: 10800 - api_key: dummyapitoken - state: present - -- name: Delete the record - community.general.gandi_livedns: - domain: my.com - type: CNAME - record: mail - api_key: dummyapitoken - state: absent -''' - -RETURN = r''' -record: - description: A dictionary containing the record data. - returned: success, except on record deletion - type: dict - contains: - values: - description: The record content (details depend on record type). - returned: success - type: list - elements: str - sample: - - 192.0.2.91 - - 192.0.2.92 - record: - description: The record name. - returned: success - type: str - sample: www - ttl: - description: The time-to-live for the record. - returned: success - type: int - sample: 300 - type: - description: The record type. - returned: success - type: str - sample: A - domain: - description: The domain associated with the record. - returned: success - type: str - sample: my.com -''' - - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.gandi_livedns_api import GandiLiveDNSAPI - - -def main(): - module = AnsibleModule( - argument_spec=dict( - api_key=dict(type='str', required=True, no_log=True), - record=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['absent', 'present']), - ttl=dict(type='int'), - type=dict(type='str', required=True), - values=dict(type='list', elements='str'), - domain=dict(type='str', required=True), - ), - supports_check_mode=True, - required_if=[ - ('state', 'present', ['values', 'ttl']), - ], - ) - - gandi_api = GandiLiveDNSAPI(module) - - if module.params['state'] == 'present': - ret, changed = gandi_api.ensure_dns_record(module.params['record'], - module.params['type'], - module.params['ttl'], - module.params['values'], - module.params['domain']) - else: - ret, changed = gandi_api.delete_dns_record(module.params['record'], - module.params['type'], - module.params['values'], - module.params['domain']) - - result = dict( - changed=changed, - ) - if ret: - result['record'] = gandi_api.build_result(ret, - module.params['domain']) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/net_tools/haproxy.py b/ansible_collections/community/general/plugins/modules/net_tools/haproxy.py deleted file mode 100644 index f7360366..00000000 --- a/ansible_collections/community/general/plugins/modules/net_tools/haproxy.py +++ /dev/null @@ -1,480 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2014, Ravi Bhure -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: haproxy -short_description: Enable, disable, and set weights for HAProxy backend servers using socket commands -author: -- Ravi Bhure (@ravibhure) -description: - - Enable, disable, drain and set weights for HAProxy backend servers using socket commands. -notes: - - Enable, disable and drain commands are restricted and can only be issued on - sockets configured for level 'admin'. For example, you can add the line - 'stats socket /var/run/haproxy.sock level admin' to the general section of - haproxy.cfg. See U(http://haproxy.1wt.eu/download/1.5/doc/configuration.txt). - - Depends on netcat (nc) being available; you need to install the appropriate - package for your operating system before this module can be used. -options: - backend: - description: - - Name of the HAProxy backend pool. - - If this parameter is unset, it will be auto-detected. - type: str - drain: - description: - - Wait until the server has no active connections or until the timeout - determined by wait_interval and wait_retries is reached. - - Continue only after the status changes to 'MAINT'. - - This overrides the shutdown_sessions option. - type: bool - default: false - host: - description: - - Name of the backend host to change. - type: str - required: true - shutdown_sessions: - description: - - When disabling a server, immediately terminate all the sessions attached - to the specified server. - - This can be used to terminate long-running sessions after a server is put - into maintenance mode. Overridden by the drain option. - type: bool - default: no - socket: - description: - - Path to the HAProxy socket file. - type: path - default: /var/run/haproxy.sock - state: - description: - - Desired state of the provided backend host. - - Note that C(drain) state was added in version 2.4. - - It is supported only by HAProxy version 1.5 or later, - - When used on versions < 1.5, it will be ignored. - type: str - required: true - choices: [ disabled, drain, enabled ] - agent: - description: - - Disable/enable agent checks (depending on I(state) value). - type: bool - default: no - version_added: 1.0.0 - health: - description: - - Disable/enable health checks (depending on I(state) value). - type: bool - default: no - version_added: "1.0.0" - fail_on_not_found: - description: - - Fail whenever trying to enable/disable a backend host that does not exist - type: bool - default: no - wait: - description: - - Wait until the server reports a status of 'UP' when C(state=enabled), - status of 'MAINT' when C(state=disabled) or status of 'DRAIN' when C(state=drain) - type: bool - default: no - wait_interval: - description: - - Number of seconds to wait between retries. - type: int - default: 5 - wait_retries: - description: - - Number of times to check for status after changing the state. - type: int - default: 25 - weight: - description: - - The value passed in argument. - - If the value ends with the `%` sign, then the new weight will be - relative to the initially configured weight. - - Relative weights are only permitted between 0 and 100% and absolute - weights are permitted between 0 and 256. - type: str -''' - -EXAMPLES = r''' -- name: Disable server in 'www' backend pool - community.general.haproxy: - state: disabled - host: '{{ inventory_hostname }}' - backend: www - -- name: Disable server in 'www' backend pool, also stop health/agent checks - community.general.haproxy: - state: disabled - host: '{{ inventory_hostname }}' - health: yes - agent: yes - -- name: Disable server without backend pool name (apply to all available backend pool) - community.general.haproxy: - state: disabled - host: '{{ inventory_hostname }}' - -- name: Disable server, provide socket file - community.general.haproxy: - state: disabled - host: '{{ inventory_hostname }}' - socket: /var/run/haproxy.sock - backend: www - -- name: Disable server, provide socket file, wait until status reports in maintenance - community.general.haproxy: - state: disabled - host: '{{ inventory_hostname }}' - socket: /var/run/haproxy.sock - backend: www - wait: yes - -# Place server in drain mode, providing a socket file. Then check the server's -# status every minute to see if it changes to maintenance mode, continuing if it -# does in an hour and failing otherwise. -- community.general.haproxy: - state: disabled - host: '{{ inventory_hostname }}' - socket: /var/run/haproxy.sock - backend: www - wait: yes - drain: yes - wait_interval: 60 - wait_retries: 60 - -- name: Disable backend server in 'www' backend pool and drop open sessions to it - community.general.haproxy: - state: disabled - host: '{{ inventory_hostname }}' - backend: www - socket: /var/run/haproxy.sock - shutdown_sessions: yes - -- name: Disable server without backend pool name (apply to all available backend pool) but fail when the backend host is not found - community.general.haproxy: - state: disabled - host: '{{ inventory_hostname }}' - fail_on_not_found: yes - -- name: Enable server in 'www' backend pool - community.general.haproxy: - state: enabled - host: '{{ inventory_hostname }}' - backend: www - -- name: Enable server in 'www' backend pool wait until healthy - community.general.haproxy: - state: enabled - host: '{{ inventory_hostname }}' - backend: www - wait: yes - -- name: Enable server in 'www' backend pool wait until healthy. Retry 10 times with intervals of 5 seconds to retrieve the health - community.general.haproxy: - state: enabled - host: '{{ inventory_hostname }}' - backend: www - wait: yes - wait_retries: 10 - wait_interval: 5 - -- name: Enable server in 'www' backend pool with change server(s) weight - community.general.haproxy: - state: enabled - host: '{{ inventory_hostname }}' - socket: /var/run/haproxy.sock - weight: 10 - backend: www - -- name: Set the server in 'www' backend pool to drain mode - community.general.haproxy: - state: drain - host: '{{ inventory_hostname }}' - socket: /var/run/haproxy.sock - backend: www -''' - -import csv -import socket -import time -from string import Template - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_bytes, to_text - - -DEFAULT_SOCKET_LOCATION = "/var/run/haproxy.sock" -RECV_SIZE = 1024 -ACTION_CHOICES = ['enabled', 'disabled', 'drain'] -WAIT_RETRIES = 25 -WAIT_INTERVAL = 5 - - -###################################################################### -class TimeoutException(Exception): - pass - - -class HAProxy(object): - """ - Used for communicating with HAProxy through its local UNIX socket interface. - Perform common tasks in Haproxy related to enable server and - disable server. - - The complete set of external commands Haproxy handles is documented - on their website: - - http://haproxy.1wt.eu/download/1.5/doc/configuration.txt#Unix Socket commands - """ - - def __init__(self, module): - self.module = module - - self.state = self.module.params['state'] - self.host = self.module.params['host'] - self.backend = self.module.params['backend'] - self.weight = self.module.params['weight'] - self.socket = self.module.params['socket'] - self.shutdown_sessions = self.module.params['shutdown_sessions'] - self.fail_on_not_found = self.module.params['fail_on_not_found'] - self.agent = self.module.params['agent'] - self.health = self.module.params['health'] - self.wait = self.module.params['wait'] - self.wait_retries = self.module.params['wait_retries'] - self.wait_interval = self.module.params['wait_interval'] - self._drain = self.module.params['drain'] - self.command_results = {} - - def execute(self, cmd, timeout=200, capture_output=True): - """ - Executes a HAProxy command by sending a message to a HAProxy's local - UNIX socket and waiting up to 'timeout' milliseconds for the response. - """ - self.client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - self.client.connect(self.socket) - self.client.sendall(to_bytes('%s\n' % cmd)) - - result = b'' - buf = b'' - buf = self.client.recv(RECV_SIZE) - while buf: - result += buf - buf = self.client.recv(RECV_SIZE) - result = to_text(result, errors='surrogate_or_strict') - - if capture_output: - self.capture_command_output(cmd, result.strip()) - self.client.close() - return result - - def capture_command_output(self, cmd, output): - """ - Capture the output for a command - """ - if 'command' not in self.command_results: - self.command_results['command'] = [] - self.command_results['command'].append(cmd) - if 'output' not in self.command_results: - self.command_results['output'] = [] - self.command_results['output'].append(output) - - def discover_all_backends(self): - """ - Discover all entries with svname = 'BACKEND' and return a list of their corresponding - pxnames - """ - data = self.execute('show stat', 200, False).lstrip('# ') - r = csv.DictReader(data.splitlines()) - return tuple(map(lambda d: d['pxname'], filter(lambda d: d['svname'] == 'BACKEND', r))) - - def discover_version(self): - """ - Attempt to extract the haproxy version. - Return a tuple containing major and minor version. - """ - data = self.execute('show info', 200, False) - lines = data.splitlines() - line = [x for x in lines if 'Version:' in x] - try: - version_values = line[0].partition(':')[2].strip().split('.', 3) - version = (int(version_values[0]), int(version_values[1])) - except (ValueError, TypeError, IndexError): - version = None - - return version - - def execute_for_backends(self, cmd, pxname, svname, wait_for_status=None): - """ - Run some command on the specified backends. If no backends are provided they will - be discovered automatically (all backends) - """ - # Discover backends if none are given - if pxname is None: - backends = self.discover_all_backends() - else: - backends = [pxname] - - # Run the command for each requested backend - for backend in backends: - # Fail when backends were not found - state = self.get_state_for(backend, svname) - if (self.fail_on_not_found) and state is None: - self.module.fail_json( - msg="The specified backend '%s/%s' was not found!" % (backend, svname)) - - if state is not None: - self.execute(Template(cmd).substitute(pxname=backend, svname=svname)) - if self.wait: - self.wait_until_status(backend, svname, wait_for_status) - - def get_state_for(self, pxname, svname): - """ - Find the state of specific services. When pxname is not set, get all backends for a specific host. - Returns a list of dictionaries containing the status and weight for those services. - """ - data = self.execute('show stat', 200, False).lstrip('# ') - r = csv.DictReader(data.splitlines()) - state = tuple( - map( - lambda d: {'status': d['status'], 'weight': d['weight'], 'scur': d['scur']}, - filter(lambda d: (pxname is None or d['pxname'] - == pxname) and d['svname'] == svname, r) - ) - ) - return state or None - - def wait_until_status(self, pxname, svname, status): - """ - Wait for a service to reach the specified status. Try RETRIES times - with INTERVAL seconds of sleep in between. If the service has not reached - the expected status in that time, the module will fail. If the service was - not found, the module will fail. - """ - for i in range(1, self.wait_retries): - state = self.get_state_for(pxname, svname) - - # We can assume there will only be 1 element in state because both svname and pxname are always set when we get here - # When using track we get a status like this: MAINT (via pxname/svname) so we need to do substring matching - if status in state[0]['status']: - if not self._drain or state[0]['scur'] == '0': - return True - time.sleep(self.wait_interval) - - self.module.fail_json(msg="server %s/%s not status '%s' after %d retries. Aborting." % - (pxname, svname, status, self.wait_retries)) - - def enabled(self, host, backend, weight): - """ - Enabled action, marks server to UP and checks are re-enabled, - also supports to get current weight for server (default) and - set the weight for haproxy backend server when provides. - """ - cmd = "get weight $pxname/$svname; enable server $pxname/$svname" - if self.agent: - cmd += "; enable agent $pxname/$svname" - if self.health: - cmd += "; enable health $pxname/$svname" - if weight: - cmd += "; set weight $pxname/$svname %s" % weight - self.execute_for_backends(cmd, backend, host, 'UP') - - def disabled(self, host, backend, shutdown_sessions): - """ - Disabled action, marks server to DOWN for maintenance. In this mode, no more checks will be - performed on the server until it leaves maintenance, - also it shutdown sessions while disabling backend host server. - """ - cmd = "get weight $pxname/$svname" - if self.agent: - cmd += "; disable agent $pxname/$svname" - if self.health: - cmd += "; disable health $pxname/$svname" - cmd += "; disable server $pxname/$svname" - if shutdown_sessions: - cmd += "; shutdown sessions server $pxname/$svname" - self.execute_for_backends(cmd, backend, host, 'MAINT') - - def drain(self, host, backend, status='DRAIN'): - """ - Drain action, sets the server to DRAIN mode. - In this mode, the server will not accept any new connections - other than those that are accepted via persistence. - """ - haproxy_version = self.discover_version() - - # check if haproxy version supports DRAIN state (starting with 1.5) - if haproxy_version and (1, 5) <= haproxy_version: - cmd = "set server $pxname/$svname state drain" - self.execute_for_backends(cmd, backend, host, "DRAIN") - if status == "MAINT": - self.disabled(host, backend, self.shutdown_sessions) - - def act(self): - """ - Figure out what you want to do from ansible, and then do it. - """ - # Get the state before the run - self.command_results['state_before'] = self.get_state_for(self.backend, self.host) - - # toggle enable/disable server - if self.state == 'enabled': - self.enabled(self.host, self.backend, self.weight) - elif self.state == 'disabled' and self._drain: - self.drain(self.host, self.backend, status='MAINT') - elif self.state == 'disabled': - self.disabled(self.host, self.backend, self.shutdown_sessions) - elif self.state == 'drain': - self.drain(self.host, self.backend) - else: - self.module.fail_json(msg="unknown state specified: '%s'" % self.state) - - # Get the state after the run - self.command_results['state_after'] = self.get_state_for(self.backend, self.host) - - # Report change status - self.command_results['changed'] = (self.command_results['state_before'] != self.command_results['state_after']) - - self.module.exit_json(**self.command_results) - - -def main(): - - # load ansible module object - module = AnsibleModule( - argument_spec=dict( - state=dict(type='str', required=True, choices=ACTION_CHOICES), - host=dict(type='str', required=True), - backend=dict(type='str'), - weight=dict(type='str'), - socket=dict(type='path', default=DEFAULT_SOCKET_LOCATION), - shutdown_sessions=dict(type='bool', default=False), - fail_on_not_found=dict(type='bool', default=False), - health=dict(type='bool', default=False), - agent=dict(type='bool', default=False), - wait=dict(type='bool', default=False), - wait_retries=dict(type='int', default=WAIT_RETRIES), - wait_interval=dict(type='int', default=WAIT_INTERVAL), - drain=dict(type='bool', default=False), - ), - ) - - if not socket: - module.fail_json(msg="unable to locate haproxy socket") - - ansible_haproxy = HAProxy(module) - ansible_haproxy.act() - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/net_tools/ip_netns.py b/ansible_collections/community/general/plugins/modules/net_tools/ip_netns.py deleted file mode 100644 index 00f1112b..00000000 --- a/ansible_collections/community/general/plugins/modules/net_tools/ip_netns.py +++ /dev/null @@ -1,145 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2017, Arie Bregman -# -# This file is a module for Ansible that interacts with Network Manager -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ip_netns -author: "Arie Bregman (@bregman-arie)" -short_description: Manage network namespaces -requirements: [ ip ] -description: - - Create or delete network namespaces using the ip command. -options: - name: - required: false - description: - - Name of the namespace - type: str - state: - required: false - default: "present" - choices: [ present, absent ] - description: - - Whether the namespace should exist - type: str -''' - -EXAMPLES = ''' -- name: Create a namespace named mario - community.general.ip_netns: - name: mario - state: present - -- name: Delete a namespace named luigi - community.general.ip_netns: - name: luigi - state: absent -''' - -RETURN = ''' -# Default return values -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_text - - -class Namespace(object): - """Interface to network namespaces. """ - - def __init__(self, module): - self.module = module - self.name = module.params['name'] - self.state = module.params['state'] - - def _netns(self, command): - '''Run ip nents command''' - return self.module.run_command(['ip', 'netns'] + command) - - def exists(self): - '''Check if the namespace already exists''' - rc, out, err = self.module.run_command(['ip', 'netns', 'list']) - if rc != 0: - self.module.fail_json(msg=to_text(err)) - return self.name in out - - def add(self): - '''Create network namespace''' - rtc, out, err = self._netns(['add', self.name]) - - if rtc != 0: - self.module.fail_json(msg=err) - - def delete(self): - '''Delete network namespace''' - rtc, out, err = self._netns(['del', self.name]) - if rtc != 0: - self.module.fail_json(msg=err) - - def check(self): - '''Run check mode''' - changed = False - - if self.state == 'present' and self.exists(): - changed = True - - elif self.state == 'absent' and self.exists(): - changed = True - elif self.state == 'present' and not self.exists(): - changed = True - - self.module.exit_json(changed=changed) - - def run(self): - '''Make the necessary changes''' - changed = False - - if self.state == 'absent': - if self.exists(): - self.delete() - changed = True - elif self.state == 'present': - if not self.exists(): - self.add() - changed = True - - self.module.exit_json(changed=changed) - - -def main(): - """Entry point.""" - module = AnsibleModule( - argument_spec={ - 'name': {'default': None}, - 'state': {'default': 'present', 'choices': ['present', 'absent']}, - }, - supports_check_mode=True, - ) - - network_namespace = Namespace(module) - if module.check_mode: - network_namespace.check() - else: - network_namespace.run() - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/net_tools/ipify_facts.py b/ansible_collections/community/general/plugins/modules/net_tools/ipify_facts.py deleted file mode 100644 index 2ae0348c..00000000 --- a/ansible_collections/community/general/plugins/modules/net_tools/ipify_facts.py +++ /dev/null @@ -1,105 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2015, René Moser -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: ipify_facts -short_description: Retrieve the public IP of your internet gateway -description: - - If behind NAT and need to know the public IP of your internet gateway. -author: -- René Moser (@resmo) -options: - api_url: - description: - - URL of the ipify.org API service. - - C(?format=json) will be appended per default. - type: str - default: https://api.ipify.org/ - timeout: - description: - - HTTP connection timeout in seconds. - type: int - default: 10 - validate_certs: - description: - - When set to C(NO), SSL certificates will not be validated. - type: bool - default: yes -notes: - - Visit https://www.ipify.org to get more information. -''' - -EXAMPLES = r''' -# Gather IP facts from ipify.org -- name: Get my public IP - community.general.ipify_facts: - -# Gather IP facts from your own ipify service endpoint with a custom timeout -- name: Get my public IP - community.general.ipify_facts: - api_url: http://api.example.com/ipify - timeout: 20 -''' - -RETURN = r''' ---- -ipify_public_ip: - description: Public IP of the internet gateway. - returned: success - type: str - sample: 1.2.3.4 -''' - -import json - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url -from ansible.module_utils.common.text.converters import to_text - - -class IpifyFacts(object): - - def __init__(self): - self.api_url = module.params.get('api_url') - self.timeout = module.params.get('timeout') - - def run(self): - result = { - 'ipify_public_ip': None - } - (response, info) = fetch_url(module=module, url=self.api_url + "?format=json", force=True, timeout=self.timeout) - - if not response: - module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.api_url, self.timeout)) - - data = json.loads(to_text(response.read())) - result['ipify_public_ip'] = data.get('ip') - return result - - -def main(): - global module - module = AnsibleModule( - argument_spec=dict( - api_url=dict(type='str', default='https://api.ipify.org/'), - timeout=dict(type='int', default=10), - validate_certs=dict(type='bool', default=True), - ), - supports_check_mode=True, - ) - - ipify_facts = IpifyFacts().run() - ipify_facts_result = dict(changed=False, ansible_facts=ipify_facts) - module.exit_json(**ipify_facts_result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/net_tools/ipinfoio_facts.py b/ansible_collections/community/general/plugins/modules/net_tools/ipinfoio_facts.py deleted file mode 100644 index ee1d49f3..00000000 --- a/ansible_collections/community/general/plugins/modules/net_tools/ipinfoio_facts.py +++ /dev/null @@ -1,131 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Aleksei Kostiuk -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: ipinfoio_facts -short_description: "Retrieve IP geolocation facts of a host's IP address" -description: - - "Gather IP geolocation facts of a host's IP address using ipinfo.io API" -author: "Aleksei Kostiuk (@akostyuk)" -options: - timeout: - description: - - HTTP connection timeout in seconds - required: false - default: 10 - type: int - http_agent: - description: - - Set http user agent - required: false - default: "ansible-ipinfoio-module/0.0.1" - type: str -notes: - - "Check http://ipinfo.io/ for more information" -''' - -EXAMPLES = ''' -# Retrieve geolocation data of a host's IP address -- name: Get IP geolocation data - community.general.ipinfoio_facts: -''' - -RETURN = ''' -ansible_facts: - description: "Dictionary of ip geolocation facts for a host's IP address" - returned: changed - type: complex - contains: - ip: - description: "Public IP address of a host" - type: str - sample: "8.8.8.8" - hostname: - description: Domain name - type: str - sample: "google-public-dns-a.google.com" - country: - description: ISO 3166-1 alpha-2 country code - type: str - sample: "US" - region: - description: State or province name - type: str - sample: "California" - city: - description: City name - type: str - sample: "Mountain View" - loc: - description: Latitude and Longitude of the location - type: str - sample: "37.3860,-122.0838" - org: - description: "organization's name" - type: str - sample: "AS3356 Level 3 Communications, Inc." - postal: - description: Postal code - type: str - sample: "94035" -''' -from ansible.module_utils.basic import AnsibleModule - -from ansible.module_utils.urls import fetch_url - - -USER_AGENT = 'ansible-ipinfoio-module/0.0.1' - - -class IpinfoioFacts(object): - - def __init__(self, module): - self.url = 'https://ipinfo.io/json' - self.timeout = module.params.get('timeout') - self.module = module - - def get_geo_data(self): - response, info = fetch_url(self.module, self.url, force=True, # NOQA - timeout=self.timeout) - try: - info['status'] == 200 - except AssertionError: - self.module.fail_json(msg='Could not get {0} page, ' - 'check for connectivity!'.format(self.url)) - else: - try: - content = response.read() - result = self.module.from_json(content.decode('utf8')) - except ValueError: - self.module.fail_json( - msg='Failed to parse the ipinfo.io response: ' - '{0} {1}'.format(self.url, content)) - else: - return result - - -def main(): - module = AnsibleModule( # NOQA - argument_spec=dict( - http_agent=dict(default=USER_AGENT), - timeout=dict(type='int', default=10), - ), - supports_check_mode=True, - ) - - ipinfoio = IpinfoioFacts(module) - ipinfoio_result = dict( - changed=False, ansible_facts=ipinfoio.get_geo_data()) - module.exit_json(**ipinfoio_result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/net_tools/ipwcli_dns.py b/ansible_collections/community/general/plugins/modules/net_tools/ipwcli_dns.py deleted file mode 100644 index 8a6122ed..00000000 --- a/ansible_collections/community/general/plugins/modules/net_tools/ipwcli_dns.py +++ /dev/null @@ -1,349 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2020, Christian Wollinger -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ipwcli_dns - -short_description: Manage DNS Records for Ericsson IPWorks via ipwcli - -version_added: '0.2.0' - -description: - - "Manage DNS records for the Ericsson IPWorks DNS server. The module will use the ipwcli to deploy the DNS records." - -requirements: - - ipwcli (installed on Ericsson IPWorks) - -notes: - - To make the DNS record changes effective, you need to run C(update dnsserver) on the ipwcli. - -options: - dnsname: - description: - - Name of the record. - required: true - type: str - type: - description: - - Type of the record. - required: true - type: str - choices: [ NAPTR, SRV, A, AAAA ] - container: - description: - - Sets the container zone for the record. - required: true - type: str - address: - description: - - The IP address for the A or AAAA record. - - Required for C(type=A) or C(type=AAAA) - type: str - ttl: - description: - - Sets the TTL of the record. - type: int - default: 3600 - state: - description: - - Whether the record should exist or not. - type: str - choices: [ absent, present ] - default: present - priority: - description: - - Sets the priority of the SRV record. - type: int - default: 10 - weight: - description: - - Sets the weight of the SRV record. - type: int - default: 10 - port: - description: - - Sets the port of the SRV record. - - Required for C(type=SRV) - type: int - target: - description: - - Sets the target of the SRV record. - - Required for C(type=SRV) - type: str - order: - description: - - Sets the order of the NAPTR record. - - Required for C(type=NAPTR) - type: int - preference: - description: - - Sets the preference of the NAPTR record. - - Required for C(type=NAPTR) - type: int - flags: - description: - - Sets one of the possible flags of NAPTR record. - - Required for C(type=NAPTR) - type: str - choices: ['S', 'A', 'U', 'P'] - service: - description: - - Sets the service of the NAPTR record. - - Required for C(type=NAPTR) - type: str - replacement: - description: - - Sets the replacement of the NAPTR record. - - Required for C(type=NAPTR) - type: str - username: - description: - - Username to login on ipwcli. - type: str - required: true - password: - description: - - Password to login on ipwcli. - type: str - required: true - -author: - - Christian Wollinger (@cwollinger) -''' - -EXAMPLES = ''' -- name: Create A record - community.general.ipwcli_dns: - dnsname: example.com - type: A - container: ZoneOne - address: 127.0.0.1 - -- name: Remove SRV record if exists - community.general.ipwcli_dns: - dnsname: _sip._tcp.test.example.com - type: SRV - container: ZoneOne - ttl: 100 - state: absent - target: example.com - port: 5060 - -- name: Create NAPTR record - community.general.ipwcli_dns: - dnsname: test.example.com - type: NAPTR - preference: 10 - container: ZoneOne - ttl: 100 - order: 10 - service: 'SIP+D2T' - replacement: '_sip._tcp.test.example.com.' - flags: S -''' - -RETURN = ''' -record: - description: The created record from the input params - type: str - returned: always -''' - -from ansible.module_utils.basic import AnsibleModule -import os - - -class ResourceRecord(object): - - def __init__(self, module): - self.module = module - self.dnsname = module.params['dnsname'] - self.dnstype = module.params['type'] - self.container = module.params['container'] - self.address = module.params['address'] - self.ttl = module.params['ttl'] - self.state = module.params['state'] - self.priority = module.params['priority'] - self.weight = module.params['weight'] - self.port = module.params['port'] - self.target = module.params['target'] - self.order = module.params['order'] - self.preference = module.params['preference'] - self.flags = module.params['flags'] - self.service = module.params['service'] - self.replacement = module.params['replacement'] - self.user = module.params['username'] - self.password = module.params['password'] - - def create_naptrrecord(self): - # create NAPTR record with the given params - record = ('naptrrecord %s -set ttl=%s;container=%s;order=%s;preference=%s;flags="%s";service="%s";replacement="%s"' - % (self.dnsname, self.ttl, self.container, self.order, self.preference, self.flags, self.service, self.replacement)) - return record - - def create_srvrecord(self): - # create SRV record with the given params - record = ('srvrecord %s -set ttl=%s;container=%s;priority=%s;weight=%s;port=%s;target=%s' - % (self.dnsname, self.ttl, self.container, self.priority, self.weight, self.port, self.target)) - return record - - def create_arecord(self): - # create A record with the given params - if self.dnstype == 'AAAA': - record = 'aaaarecord %s %s -set ttl=%s;container=%s' % (self.dnsname, self.address, self.ttl, self.container) - else: - record = 'arecord %s %s -set ttl=%s;container=%s' % (self.dnsname, self.address, self.ttl, self.container) - - return record - - def list_record(self, record): - # check if the record exists via list on ipwcli - search = 'list %s' % (record.replace(';', '&&').replace('set', 'where')) - cmd = [ - self.module.get_bin_path('ipwcli', True), - '-user=%s' % self.user, - '-password=%s' % self.password, - ] - rc, out, err = self.module.run_command(cmd, data=search) - - if 'Invalid username or password' in out: - self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password') - - if (('ARecord %s' % self.dnsname in out and rc == 0) or ('SRVRecord %s' % self.dnsname in out and rc == 0) or - ('NAPTRRecord %s' % self.dnsname in out and rc == 0)): - return True, rc, out, err - - return False, rc, out, err - - def deploy_record(self, record): - # check what happens if create fails on ipworks - stdin = 'create %s' % (record) - cmd = [ - self.module.get_bin_path('ipwcli', True), - '-user=%s' % self.user, - '-password=%s' % self.password, - ] - rc, out, err = self.module.run_command(cmd, data=stdin) - - if 'Invalid username or password' in out: - self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password') - - if '1 object(s) created.' in out: - return rc, out, err - else: - self.module.fail_json(msg='record creation failed', stderr=out) - - def delete_record(self, record): - # check what happens if create fails on ipworks - stdin = 'delete %s' % (record.replace(';', '&&').replace('set', 'where')) - cmd = [ - self.module.get_bin_path('ipwcli', True), - '-user=%s' % self.user, - '-password=%s' % self.password, - ] - rc, out, err = self.module.run_command(cmd, data=stdin) - - if 'Invalid username or password' in out: - self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password') - - if '1 object(s) were updated.' in out: - return rc, out, err - else: - self.module.fail_json(msg='record deletion failed', stderr=out) - - -def run_module(): - # define available arguments/parameters a user can pass to the module - module_args = dict( - dnsname=dict(type='str', required=True), - type=dict(type='str', required=True, choices=['A', 'AAAA', 'SRV', 'NAPTR']), - container=dict(type='str', required=True), - address=dict(type='str', required=False), - ttl=dict(type='int', required=False, default=3600), - state=dict(type='str', default='present', choices=['absent', 'present']), - priority=dict(type='int', required=False, default=10), - weight=dict(type='int', required=False, default=10), - port=dict(type='int', required=False), - target=dict(type='str', required=False), - order=dict(type='int', required=False), - preference=dict(type='int', required=False), - flags=dict(type='str', required=False, choices=['S', 'A', 'U', 'P']), - service=dict(type='str', required=False), - replacement=dict(type='str', required=False), - username=dict(type='str', required=True), - password=dict(type='str', required=True, no_log=True) - ) - - # define result - result = dict( - changed=False, - stdout='', - stderr='', - rc=0, - record='' - ) - - # supports check mode - module = AnsibleModule( - argument_spec=module_args, - required_if=[ - ['type', 'A', ['address']], - ['type', 'AAAA', ['address']], - ['type', 'SRV', ['port', 'target']], - ['type', 'NAPTR', ['preference', 'order', 'service', 'replacement']], - ], - supports_check_mode=True - ) - - user = ResourceRecord(module) - - if user.dnstype == 'NAPTR': - record = user.create_naptrrecord() - elif user.dnstype == 'SRV': - record = user.create_srvrecord() - elif user.dnstype == 'A' or user.dnstype == 'AAAA': - record = user.create_arecord() - - found, rc, out, err = user.list_record(record) - - if found and user.state == 'absent': - if module.check_mode: - module.exit_json(changed=True) - rc, out, err = user.delete_record(record) - result['changed'] = True - result['record'] = record - result['rc'] = rc - result['stdout'] = out - result['stderr'] = err - elif not found and user.state == 'present': - if module.check_mode: - module.exit_json(changed=True) - rc, out, err = user.deploy_record(record) - result['changed'] = True - result['record'] = record - result['rc'] = rc - result['stdout'] = out - result['stderr'] = err - else: - result['changed'] = False - result['record'] = record - result['rc'] = rc - result['stdout'] = out - result['stderr'] = err - - module.exit_json(**result) - - -def main(): - run_module() - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/net_tools/lldp.py b/ansible_collections/community/general/plugins/modules/net_tools/lldp.py deleted file mode 100644 index 1b8fa9eb..00000000 --- a/ansible_collections/community/general/plugins/modules/net_tools/lldp.py +++ /dev/null @@ -1,80 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: lldp -requirements: [ lldpctl ] -short_description: get details reported by lldp -description: - - Reads data out of lldpctl -options: {} -author: "Andy Hill (@andyhky)" -notes: - - Requires lldpd running and lldp enabled on switches -''' - -EXAMPLES = ''' -# Retrieve switch/port information - - name: Gather information from lldp - community.general.lldp: - - - name: Print each switch/port - ansible.builtin.debug: - msg: "{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifname'] }}" - with_items: "{{ lldp.keys() }}" - -# TASK: [Print each switch/port] *********************************************************** -# ok: [10.13.0.22] => (item=eth2) => {"item": "eth2", "msg": "switch1.example.com / Gi0/24"} -# ok: [10.13.0.22] => (item=eth1) => {"item": "eth1", "msg": "switch2.example.com / Gi0/3"} -# ok: [10.13.0.22] => (item=eth0) => {"item": "eth0", "msg": "switch3.example.com / Gi0/3"} - -''' - -from ansible.module_utils.basic import AnsibleModule - - -def gather_lldp(module): - cmd = [module.get_bin_path('lldpctl'), '-f', 'keyvalue'] - rc, output, err = module.run_command(cmd) - if output: - output_dict = {} - current_dict = {} - lldp_entries = output.split("\n") - - for entry in lldp_entries: - if entry.startswith('lldp'): - path, value = entry.strip().split("=", 1) - path = path.split(".") - path_components, final = path[:-1], path[-1] - else: - value = current_dict[final] + '\n' + entry - - current_dict = output_dict - for path_component in path_components: - current_dict[path_component] = current_dict.get(path_component, {}) - current_dict = current_dict[path_component] - current_dict[final] = value - return output_dict - - -def main(): - module = AnsibleModule({}) - - lldp_output = gather_lldp(module) - try: - data = {'lldp': lldp_output['lldp']} - module.exit_json(ansible_facts=data) - except TypeError: - module.fail_json(msg="lldpctl command failed. is lldpd running?") - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/net_tools/netcup_dns.py b/ansible_collections/community/general/plugins/modules/net_tools/netcup_dns.py deleted file mode 100644 index 5ec5cbb2..00000000 --- a/ansible_collections/community/general/plugins/modules/net_tools/netcup_dns.py +++ /dev/null @@ -1,268 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2018 Nicolai Buchwitz -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: netcup_dns -notes: [] -short_description: manage Netcup DNS records -description: - - "Manages DNS records via the Netcup API, see the docs U(https://ccp.netcup.net/run/webservice/servers/endpoint.php)" -options: - api_key: - description: - - API key for authentication, must be obtained via the netcup CCP (U(https://ccp.netcup.net)) - required: True - type: str - api_password: - description: - - API password for authentication, must be obtained via the netcup CCP (https://ccp.netcup.net) - required: True - type: str - customer_id: - description: - - Netcup customer id - required: True - type: int - domain: - description: - - Domainname the records should be added / removed - required: True - type: str - record: - description: - - Record to add or delete, supports wildcard (*). Default is C(@) (e.g. the zone name) - default: "@" - aliases: [ name ] - type: str - type: - description: - - Record type - choices: ['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', 'TLSA', 'NS', 'DS'] - required: True - type: str - value: - description: - - Record value - required: true - type: str - solo: - type: bool - default: False - description: - - Whether the record should be the only one for that record type and record name. Only use with C(state=present) - - This will delete all other records with the same record name and type. - priority: - description: - - Record priority. Required for C(type=MX) - required: False - type: int - state: - description: - - Whether the record should exist or not - required: False - default: present - choices: [ 'present', 'absent' ] - type: str -requirements: - - "nc-dnsapi >= 0.1.3" -author: "Nicolai Buchwitz (@nbuchwitz)" - -''' - -EXAMPLES = ''' -- name: Create a record of type A - community.general.netcup_dns: - api_key: "..." - api_password: "..." - customer_id: "..." - domain: "example.com" - name: "mail" - type: "A" - value: "127.0.0.1" - -- name: Delete that record - community.general.netcup_dns: - api_key: "..." - api_password: "..." - customer_id: "..." - domain: "example.com" - name: "mail" - type: "A" - value: "127.0.0.1" - state: absent - -- name: Create a wildcard record - community.general.netcup_dns: - api_key: "..." - api_password: "..." - customer_id: "..." - domain: "example.com" - name: "*" - type: "A" - value: "127.0.1.1" - -- name: Set the MX record for example.com - community.general.netcup_dns: - api_key: "..." - api_password: "..." - customer_id: "..." - domain: "example.com" - type: "MX" - value: "mail.example.com" - -- name: Set a record and ensure that this is the only one - community.general.netcup_dns: - api_key: "..." - api_password: "..." - customer_id: "..." - name: "demo" - domain: "example.com" - type: "AAAA" - value: "::1" - solo: true -''' - -RETURN = ''' -records: - description: list containing all records - returned: success - type: complex - contains: - name: - description: the record name - returned: success - type: str - sample: fancy-hostname - type: - description: the record type - returned: succcess - type: str - sample: A - value: - description: the record destination - returned: success - type: str - sample: 127.0.0.1 - priority: - description: the record priority (only relevant if type=MX) - returned: success - type: int - sample: 0 - id: - description: internal id of the record - returned: success - type: int - sample: 12345 -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - -NCDNSAPI_IMP_ERR = None -try: - import nc_dnsapi - from nc_dnsapi import DNSRecord - - HAS_NCDNSAPI = True -except ImportError: - NCDNSAPI_IMP_ERR = traceback.format_exc() - HAS_NCDNSAPI = False - - -def main(): - module = AnsibleModule( - argument_spec=dict( - api_key=dict(required=True, no_log=True), - api_password=dict(required=True, no_log=True), - customer_id=dict(required=True, type='int'), - - domain=dict(required=True), - record=dict(required=False, default='@', aliases=['name']), - type=dict(required=True, choices=['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', 'TLSA', 'NS', 'DS']), - value=dict(required=True), - priority=dict(required=False, type='int'), - solo=dict(required=False, type='bool', default=False), - state=dict(required=False, choices=['present', 'absent'], default='present'), - - ), - supports_check_mode=True - ) - - if not HAS_NCDNSAPI: - module.fail_json(msg=missing_required_lib('nc-dnsapi'), exception=NCDNSAPI_IMP_ERR) - - api_key = module.params.get('api_key') - api_password = module.params.get('api_password') - customer_id = module.params.get('customer_id') - domain = module.params.get('domain') - record_type = module.params.get('type') - record = module.params.get('record') - value = module.params.get('value') - priority = module.params.get('priority') - solo = module.params.get('solo') - state = module.params.get('state') - - if record_type == 'MX' and not priority: - module.fail_json(msg="record type MX required the 'priority' argument") - - has_changed = False - all_records = [] - try: - with nc_dnsapi.Client(customer_id, api_key, api_password) as api: - all_records = api.dns_records(domain) - record = DNSRecord(record, record_type, value, priority=priority) - - # try to get existing record - record_exists = False - for r in all_records: - if r == record: - record_exists = True - record = r - - break - - if state == 'present': - if solo: - obsolete_records = [r for r in all_records if - r.hostname == record.hostname - and r.type == record.type - and not r.destination == record.destination] - - if obsolete_records: - if not module.check_mode: - all_records = api.delete_dns_records(domain, obsolete_records) - - has_changed = True - - if not record_exists: - if not module.check_mode: - all_records = api.add_dns_record(domain, record) - - has_changed = True - elif state == 'absent' and record_exists: - if not module.check_mode: - all_records = api.delete_dns_record(domain, record) - - has_changed = True - - except Exception as ex: - module.fail_json(msg=str(ex)) - - module.exit_json(changed=has_changed, result={"records": [record_data(r) for r in all_records]}) - - -def record_data(r): - return {"name": r.hostname, "type": r.type, "value": r.destination, "priority": r.priority, "id": r.id} - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/net_tools/nmcli.py b/ansible_collections/community/general/plugins/modules/net_tools/nmcli.py deleted file mode 100644 index 0a2f113c..00000000 --- a/ansible_collections/community/general/plugins/modules/net_tools/nmcli.py +++ /dev/null @@ -1,2105 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2015, Chris Long -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: nmcli -author: -- Chris Long (@alcamie101) -short_description: Manage Networking -requirements: -- nmcli -description: - - 'Manage the network devices. Create, modify and manage various connection and device type e.g., ethernet, teams, bonds, vlans etc.' - - 'On CentOS 8 and Fedora >=29 like systems, the requirements can be met by installing the following packages: NetworkManager.' - - 'On CentOS 7 and Fedora <=28 like systems, the requirements can be met by installing the following packages: NetworkManager-tui.' - - 'On Ubuntu and Debian like systems, the requirements can be met by installing the following packages: network-manager' - - 'On openSUSE, the requirements can be met by installing the following packages: NetworkManager.' -options: - state: - description: - - Whether the device should exist or not, taking action if the state is different from what is stated. - type: str - required: true - choices: [ absent, present ] - autoconnect: - description: - - Whether the connection should start on boot. - - Whether the connection profile can be automatically activated - type: bool - default: yes - conn_name: - description: - - The name used to call the connection. Pattern is [-][-]. - type: str - required: true - ifname: - description: - - The interface to bind the connection to. - - The connection will only be applicable to this interface name. - - A special value of C('*') can be used for interface-independent connections. - - The ifname argument is mandatory for all connection types except bond, team, bridge and vlan. - - This parameter defaults to C(conn_name) when left unset. - type: str - type: - description: - - This is the type of device or network connection that you wish to create or modify. - - Type C(dummy) is added in community.general 3.5.0. - - Type C(generic) is added in Ansible 2.5. - - Type C(infiniband) is added in community.general 2.0.0. - - Type C(gsm) is added in community.general 3.7.0. - - Type C(wireguard) is added in community.general 4.3.0 - type: str - choices: [ bond, bond-slave, bridge, bridge-slave, dummy, ethernet, generic, gre, infiniband, ipip, sit, team, team-slave, vlan, vxlan, wifi, gsm, - wireguard ] - mode: - description: - - This is the type of device or network connection that you wish to create for a bond or bridge. - type: str - choices: [ 802.3ad, active-backup, balance-alb, balance-rr, balance-tlb, balance-xor, broadcast ] - default: balance-rr - master: - description: - - Master ] STP forwarding delay, in seconds. - type: int - default: 15 - hellotime: - description: - - This is only used with bridge - [hello-time <1-10>] STP hello time, in seconds. - type: int - default: 2 - maxage: - description: - - This is only used with bridge - [max-age <6-42>] STP maximum message age, in seconds. - type: int - default: 20 - ageingtime: - description: - - This is only used with bridge - [ageing-time <0-1000000>] the Ethernet MAC address aging time, in seconds. - type: int - default: 300 - mac: - description: - - MAC address of the connection. - - Note this requires a recent kernel feature, originally introduced in 3.15 upstream kernel. - type: str - slavepriority: - description: - - This is only used with 'bridge-slave' - [<0-63>] - STP priority of this slave. - type: int - default: 32 - path_cost: - description: - - This is only used with 'bridge-slave' - [<1-65535>] - STP port cost for destinations via this slave. - type: int - default: 100 - hairpin: - description: - - This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through the slave the - frame was received on. - type: bool - default: yes - runner: - description: - - This is the type of device or network connection that you wish to create for a team. - type: str - choices: [ broadcast, roundrobin, activebackup, loadbalance, lacp ] - default: roundrobin - version_added: 3.4.0 - runner_hwaddr_policy: - description: - - This defines the policy of how hardware addresses of team device and port devices - should be set during the team lifetime. - type: str - choices: [ same_all, by_active, only_active ] - version_added: 3.4.0 - vlanid: - description: - - This is only used with VLAN - VLAN ID in range <0-4095>. - type: int - vlandev: - description: - - This is only used with VLAN - parent device this VLAN is on, can use ifname. - type: str - flags: - description: - - This is only used with VLAN - flags. - type: str - ingress: - description: - - This is only used with VLAN - VLAN ingress priority mapping. - type: str - egress: - description: - - This is only used with VLAN - VLAN egress priority mapping. - type: str - vxlan_id: - description: - - This is only used with VXLAN - VXLAN ID. - type: int - vxlan_remote: - description: - - This is only used with VXLAN - VXLAN destination IP address. - type: str - vxlan_local: - description: - - This is only used with VXLAN - VXLAN local IP address. - type: str - ip_tunnel_dev: - description: - - This is used with GRE/IPIP/SIT - parent device this GRE/IPIP/SIT tunnel, can use ifname. - type: str - ip_tunnel_remote: - description: - - This is used with GRE/IPIP/SIT - GRE/IPIP/SIT destination IP address. - type: str - ip_tunnel_local: - description: - - This is used with GRE/IPIP/SIT - GRE/IPIP/SIT local IP address. - type: str - ip_tunnel_input_key: - description: - - The key used for tunnel input packets. - - Only used when I(type=gre). - type: str - version_added: 3.6.0 - ip_tunnel_output_key: - description: - - The key used for tunnel output packets. - - Only used when I(type=gre). - type: str - version_added: 3.6.0 - zone: - description: - - The trust level of the connection. - - When updating this property on a currently activated connection, the change takes effect immediately. - type: str - version_added: 2.0.0 - wifi_sec: - description: - - The security configuration of the WiFi connection. - - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host. - - 'An up-to-date list of supported attributes can be found here: - U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless-security.html).' - - 'For instance to use common WPA-PSK auth with a password: - C({key-mgmt: wpa-psk, psk: my_password}).' - type: dict - suboptions: - auth-alg: - description: - - When WEP is used (that is, if I(key-mgmt) = C(none) or C(ieee8021x)) indicate the 802.11 authentication algorithm required by the AP here. - - One of C(open) for Open System, C(shared) for Shared Key, or C(leap) for Cisco LEAP. - - When using Cisco LEAP (that is, if I(key-mgmt=ieee8021x) and I(auth-alg=leap)) the I(leap-username) and I(leap-password) properties - must be specified. - type: str - choices: [ open, shared, leap ] - fils: - description: - - Indicates whether Fast Initial Link Setup (802.11ai) must be enabled for the connection. - - One of C(0) (use global default value), C(1) (disable FILS), C(2) (enable FILS if the supplicant and the access point support it) or C(3) - (enable FILS and fail if not supported). - - When set to C(0) and no global default is set, FILS will be optionally enabled. - type: int - choices: [ 0, 1, 2, 3 ] - default: 0 - group: - description: - - A list of group/broadcast encryption algorithms which prevents connections to Wi-Fi networks that do not utilize one of the algorithms in - the list. - - For maximum compatibility leave this property empty. - type: list - elements: str - choices: [ wep40, wep104, tkip, ccmp ] - key-mgmt: - description: - - Key management used for the connection. - - One of C(none) (WEP or no password protection), C(ieee8021x) (Dynamic WEP), C(owe) (Opportunistic Wireless Encryption), C(wpa-psk) (WPA2 - + WPA3 personal), C(sae) (WPA3 personal only), C(wpa-eap) (WPA2 + WPA3 enterprise) or C(wpa-eap-suite-b-192) (WPA3 enterprise only). - - This property must be set for any Wi-Fi connection that uses security. - type: str - choices: [ none, ieee8021x, owe, wpa-psk, sae, wpa-eap, wpa-eap-suite-b-192 ] - leap-password-flags: - description: Flags indicating how to handle the I(leap-password) property. - type: list - elements: int - leap-password: - description: The login password for legacy LEAP connections (that is, if I(key-mgmt=ieee8021x) and I(auth-alg=leap)). - type: str - leap-username: - description: The login username for legacy LEAP connections (that is, if I(key-mgmt=ieee8021x) and I(auth-alg=leap)). - type: str - pairwise: - description: - - A list of pairwise encryption algorithms which prevents connections to Wi-Fi networks that do not utilize one of the algorithms in the - list. - - For maximum compatibility leave this property empty. - type: list - elements: str - choices: [ tkip, ccmp ] - pmf: - description: - - Indicates whether Protected Management Frames (802.11w) must be enabled for the connection. - - One of C(0) (use global default value), C(1) (disable PMF), C(2) (enable PMF if the supplicant and the access point support it) or C(3) - (enable PMF and fail if not supported). - - When set to C(0) and no global default is set, PMF will be optionally enabled. - type: int - choices: [ 0, 1, 2, 3 ] - default: 0 - proto: - description: - - List of strings specifying the allowed WPA protocol versions to use. - - Each element may be C(wpa) (allow WPA) or C(rsn) (allow WPA2/RSN). - - If not specified, both WPA and RSN connections are allowed. - type: list - elements: str - choices: [ wpa, rsn ] - psk-flags: - description: Flags indicating how to handle the I(psk) property. - type: list - elements: int - psk: - description: - - Pre-Shared-Key for WPA networks. - - For WPA-PSK, it is either an ASCII passphrase of 8 to 63 characters that is (as specified in the 802.11i standard) hashed to derive the - actual key, or the key in form of 64 hexadecimal character. - - The WPA3-Personal networks use a passphrase of any length for SAE authentication. - type: str - wep-key-flags: - description: Flags indicating how to handle the I(wep-key0), I(wep-key1), I(wep-key2), and I(wep-key3) properties. - type: list - elements: int - wep-key-type: - description: - - Controls the interpretation of WEP keys. - - Allowed values are C(1), in which case the key is either a 10- or 26-character hexadecimal string, or a 5- or 13-character ASCII - password; or C(2), in which case the passphrase is provided as a string and will be hashed using the de-facto MD5 method to derive the - actual WEP key. - type: int - choices: [ 1, 2 ] - wep-key0: - description: - - Index 0 WEP key. This is the WEP key used in most networks. - - See the I(wep-key-type) property for a description of how this key is interpreted. - type: str - wep-key1: - description: - - Index 1 WEP key. This WEP index is not used by most networks. - - See the I(wep-key-type) property for a description of how this key is interpreted. - type: str - wep-key2: - description: - - Index 2 WEP key. This WEP index is not used by most networks. - - See the I(wep-key-type) property for a description of how this key is interpreted. - type: str - wep-key3: - description: - - Index 3 WEP key. This WEP index is not used by most networks. - - See the I(wep-key-type) property for a description of how this key is interpreted. - type: str - wep-tx-keyidx: - description: - - When static WEP is used (that is, if I(key-mgmt=none)) and a non-default WEP key index is used by the AP, put that WEP key index here. - - Valid values are C(0) (default key) through C(3). - - Note that some consumer access points (like the Linksys WRT54G) number the keys C(1) - C(4). - type: int - choices: [ 0, 1, 2, 3 ] - default: 0 - wps-method: - description: - - Flags indicating which mode of WPS is to be used if any. - - There is little point in changing the default setting as NetworkManager will automatically determine whether it is feasible to start WPS - enrollment from the Access Point capabilities. - - WPS can be disabled by setting this property to a value of C(1). - type: int - default: 0 - version_added: 3.0.0 - ssid: - description: - - Name of the Wireless router or the access point. - type: str - version_added: 3.0.0 - wifi: - description: - - The configuration of the WiFi connection. - - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host. - - 'An up-to-date list of supported attributes can be found here: - U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless.html).' - - 'For instance to create a hidden AP mode WiFi connection: - C({hidden: true, mode: ap}).' - type: dict - suboptions: - ap-isolation: - description: - - Configures AP isolation, which prevents communication between wireless devices connected to this AP. - - This property can be set to a value different from C(-1) only when the interface is configured in AP mode. - - If set to C(1), devices are not able to communicate with each other. This increases security because it protects devices against attacks - from other clients in the network. At the same time, it prevents devices to access resources on the same wireless networks as file - shares, printers, etc. - - If set to C(0), devices can talk to each other. - - When set to C(-1), the global default is used; in case the global default is unspecified it is assumed to be C(0). - type: int - choices: [ -1, 0, 1 ] - default: -1 - assigned-mac-address: - description: - - The new field for the cloned MAC address. - - It can be either a hardware address in ASCII representation, or one of the special values C(preserve), C(permanent), C(random) or - C(stable). - - This field replaces the deprecated I(cloned-mac-address) on D-Bus, which can only contain explicit hardware addresses. - - Note that this property only exists in D-Bus API. libnm and nmcli continue to call this property I(cloned-mac-address). - type: str - band: - description: - - 802.11 frequency band of the network. - - One of C(a) for 5GHz 802.11a or C(bg) for 2.4GHz 802.11. - - This will lock associations to the Wi-Fi network to the specific band, so for example, if C(a) is specified, the device will not - associate with the same network in the 2.4GHz band even if the network's settings are compatible. - - This setting depends on specific driver capability and may not work with all drivers. - type: str - choices: [ a, bg ] - bssid: - description: - - If specified, directs the device to only associate with the given access point. - - This capability is highly driver dependent and not supported by all devices. - - Note this property does not control the BSSID used when creating an Ad-Hoc network and is unlikely to in the future. - type: str - channel: - description: - - Wireless channel to use for the Wi-Fi connection. - - The device will only join (or create for Ad-Hoc networks) a Wi-Fi network on the specified channel. - - Because channel numbers overlap between bands, this property also requires the I(band) property to be set. - type: int - default: 0 - cloned-mac-address: - description: - - This D-Bus field is deprecated in favor of I(assigned-mac-address) which is more flexible and allows specifying special variants like - C(random). - - For libnm and nmcli, this field is called I(cloned-mac-address). - type: str - generate-mac-address-mask: - description: - - With I(cloned-mac-address) setting C(random) or C(stable), by default all bits of the MAC address are scrambled and a - locally-administered, unicast MAC address is created. This property allows to specify that certain bits are fixed. - - Note that the least significant bit of the first MAC address will always be unset to create a unicast MAC address. - - If the property is C(null), it is eligible to be overwritten by a default connection setting. - - If the value is still c(null) or an empty string, the default is to create a locally-administered, unicast MAC address. - - If the value contains one MAC address, this address is used as mask. The set bits of the mask are to be filled with the current MAC - address of the device, while the unset bits are subject to randomization. - - Setting C(FE:FF:FF:00:00:00) means to preserve the OUI of the current MAC address and only randomize the lower 3 bytes using the - C(random) or C(stable) algorithm. - - If the value contains one additional MAC address after the mask, this address is used instead of the current MAC address to fill the bits - that shall not be randomized. - - For example, a value of C(FE:FF:FF:00:00:00 68:F7:28:00:00:00) will set the OUI of the MAC address to 68:F7:28, while the lower bits are - randomized. - - A value of C(02:00:00:00:00:00 00:00:00:00:00:00) will create a fully scrambled globally-administered, burned-in MAC address. - - If the value contains more than one additional MAC addresses, one of them is chosen randomly. For example, - C(02:00:00:00:00:00 00:00:00:00:00:00 02:00:00:00:00:00) will create a fully scrambled MAC address, randomly locally or globally - administered. - type: str - hidden: - description: - - If C(true), indicates that the network is a non-broadcasting network that hides its SSID. This works both in infrastructure and AP mode. - - In infrastructure mode, various workarounds are used for a more reliable discovery of hidden networks, such as probe-scanning the SSID. - However, these workarounds expose inherent insecurities with hidden SSID networks, and thus hidden SSID networks should be used with - caution. - - In AP mode, the created network does not broadcast its SSID. - - Note that marking the network as hidden may be a privacy issue for you (in infrastructure mode) or client stations (in AP mode), as the - explicit probe-scans are distinctly recognizable on the air. - type: bool - default: false - mac-address-blacklist: - description: - - A list of permanent MAC addresses of Wi-Fi devices to which this connection should never apply. - - Each MAC address should be given in the standard hex-digits-and-colons notation (for example, C(00:11:22:33:44:55)). - type: list - elements: str - mac-address-randomization: - description: - - One of C(0) (never randomize unless the user has set a global default to randomize and the supplicant supports randomization), C(1) - (never randomize the MAC address), or C(2) (always randomize the MAC address). - - This property is deprecated for I(cloned-mac-address). - type: int - default: 0 - choices: [ 0, 1, 2 ] - mac-address: - description: - - If specified, this connection will only apply to the Wi-Fi device whose permanent MAC address matches. - - This property does not change the MAC address of the device (for example for MAC spoofing). - type: str - mode: - description: Wi-Fi network mode. If blank, C(infrastructure) is assumed. - type: str - choices: [ infrastructure, mesh, adhoc, ap ] - default: infrastructure - mtu: - description: If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into multiple Ethernet frames. - type: int - default: 0 - powersave: - description: - - One of C(2) (disable Wi-Fi power saving), C(3) (enable Wi-Fi power saving), C(1) (don't touch currently configure setting) or C(0) (use - the globally configured value). - - All other values are reserved. - type: int - default: 0 - choices: [ 0, 1, 2, 3 ] - rate: - description: - - If non-zero, directs the device to only use the specified bitrate for communication with the access point. - - Units are in Kb/s, so for example C(5500) = 5.5 Mbit/s. - - This property is highly driver dependent and not all devices support setting a static bitrate. - type: int - default: 0 - tx-power: - description: - - If non-zero, directs the device to use the specified transmit power. - - Units are dBm. - - This property is highly driver dependent and not all devices support setting a static transmit power. - type: int - default: 0 - wake-on-wlan: - description: - - The NMSettingWirelessWakeOnWLan options to enable. Not all devices support all options. - - May be any combination of C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_ANY) (C(0x2)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_DISCONNECT) (C(0x4)), - C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_MAGIC) (C(0x8)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_GTK_REKEY_FAILURE) (C(0x10)), - C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_EAP_IDENTITY_REQUEST) (C(0x20)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_4WAY_HANDSHAKE) (C(0x40)), - C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_RFKILL_RELEASE) (C(0x80)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_TCP) (C(0x100)) or the special values - C(0x1) (to use global settings) and C(0x8000) (to disable management of Wake-on-LAN in NetworkManager). - - Note the option values' sum must be specified in order to combine multiple options. - type: int - default: 1 - version_added: 3.5.0 - ignore_unsupported_suboptions: - description: - - Ignore suboptions which are invalid or unsupported by the version of NetworkManager/nmcli installed on the host. - - Only I(wifi) and I(wifi_sec) options are currently affected. - type: bool - default: false - version_added: 3.6.0 - gsm: - description: - - The configuration of the GSM connection. - - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host. - - 'An up-to-date list of supported attributes can be found here: - U(https://networkmanager.dev/docs/api/latest/settings-gsm.html).' - - 'For instance to use apn, pin, username and password: - C({apn: provider.apn, pin: 1234, username: apn.username, password: apn.password}).' - type: dict - version_added: 3.7.0 - suboptions: - apn: - description: - - The GPRS Access Point Name specifying the APN used when establishing a data session with the GSM-based network. - - The APN often determines how the user will be billed for their network usage and whether the user has access to the Internet or - just a provider-specific walled-garden, so it is important to use the correct APN for the user's mobile broadband plan. - - The APN may only be composed of the characters a-z, 0-9, ., and - per GSM 03.60 Section 14.9. - type: str - auto-config: - description: When C(true), the settings such as I(gsm.apn), I(gsm.username), or I(gsm.password) will default to values that match the network - the modem will register to in the Mobile Broadband Provider database. - type: bool - default: false - device-id: - description: - - The device unique identifier (as given by the C(WWAN) management service) which this connection applies to. - - If given, the connection will only apply to the specified device. - type: str - home-only: - description: - - When C(true), only connections to the home network will be allowed. - - Connections to roaming networks will not be made. - type: bool - default: false - mtu: - description: If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into multiple Ethernet frames. - type: int - default: 0 - network-id: - description: - - The Network ID (GSM LAI format, ie MCC-MNC) to force specific network registration. - - If the Network ID is specified, NetworkManager will attempt to force the device to register only on the specified network. - - This can be used to ensure that the device does not roam when direct roaming control of the device is not otherwise possible. - type: str - number: - description: Legacy setting that used to help establishing PPP data sessions for GSM-based modems. - type: str - password: - description: - - The password used to authenticate with the network, if required. - - Many providers do not require a password, or accept any password. - - But if a password is required, it is specified here. - type: str - password-flags: - description: - - NMSettingSecretFlags indicating how to handle the I(password) property. - - 'Following choices are allowed: - C(0) B(NONE): The system is responsible for providing and storing this secret (default), - C(1) B(AGENT_OWNED): A user secret agent is responsible for providing and storing this secret; when it is required agents will be - asked to retrieve it - C(2) B(NOT_SAVED): This secret should not be saved, but should be requested from the user each time it is needed - C(4) B(NOT_REQUIRED): In situations where it cannot be automatically determined that the secret is required - (some VPNs and PPP providers do not require all secrets) this flag indicates that the specific secret is not required.' - type: int - choices: [ 0, 1, 2 , 4 ] - default: 0 - pin: - description: - - If the SIM is locked with a PIN it must be unlocked before any other operations are requested. - - Specify the PIN here to allow operation of the device. - type: str - pin-flags: - description: - - NMSettingSecretFlags indicating how to handle the I(gsm.pin) property. - - See I(gsm.password-flags) for NMSettingSecretFlags choices. - type: int - choices: [ 0, 1, 2 , 4 ] - default: 0 - sim-id: - description: - - The SIM card unique identifier (as given by the C(WWAN) management service) which this connection applies to. - - 'If given, the connection will apply to any device also allowed by I(gsm.device-id) which contains a SIM card matching - the given identifier.' - type: str - sim-operator-id: - description: - - A MCC/MNC string like C(310260) or C(21601I) identifying the specific mobile network operator which this connection applies to. - - 'If given, the connection will apply to any device also allowed by I(gsm.device-id) and I(gsm.sim-id) which contains a SIM card - provisioned by the given operator.' - type: str - username: - description: - - The username used to authenticate with the network, if required. - - Many providers do not require a username, or accept any username. - - But if a username is required, it is specified here. - wireguard: - description: - - The configuration of the Wireguard connection. - - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host. - - 'An up-to-date list of supported attributes can be found here: - U(https://networkmanager.dev/docs/api/latest/settings-wireguard.html).' - - 'For instance to configure a listen port: - C({listen-port: 12345}).' - type: dict - version_added: 4.3.0 - suboptions: - fwmark: - description: - - The 32-bit fwmark for outgoing packets. - - The use of fwmark is optional and is by default off. Setting it to 0 disables it. - - Note that I(wireguard.ip4-auto-default-route) or I(wireguard.ip6-auto-default-route) enabled, implies to automatically choose a fwmark. - type: int - ip4-auto-default-route: - description: - - Whether to enable special handling of the IPv4 default route. - - If enabled, the IPv4 default route from I(wireguard.peer-routes) will be placed to a dedicated routing-table and two policy - routing rules will be added. - - The fwmark number is also used as routing-table for the default-route, and if fwmark is zero, an unused fwmark/table is chosen - automatically. This corresponds to what wg-quick does with Table=auto and what WireGuard calls "Improved Rule-based Routing" - type: bool - ip6-auto-default-route: - description: - - Like I(wireguard.ip4-auto-default-route), but for the IPv6 default route. - type: bool - listen-port: - description: The WireGuard connection listen-port. If not specified, the port will be chosen randomly when the - interface comes up. - type: int - mtu: - description: - - If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into multiple fragments. - - If zero a default MTU is used. Note that contrary to wg-quick's MTU setting, this does not take into account the current routes - at the time of activation. - type: int - peer-routes: - description: - - Whether to automatically add routes for the AllowedIPs ranges of the peers. - - If C(true) (the default), NetworkManager will automatically add routes in the routing tables according to C(ipv4.route-table) and - C(ipv6.route-table). Usually you want this automatism enabled. - - If C(false), no such routes are added automatically. In this case, the user may want to configure static routes in C(ipv4.routes) - and C(ipv6.routes), respectively. - - Note that if the peer's AllowedIPs is C(0.0.0.0/0) or C(::/0) and the profile's C(ipv4.never-default) or C(ipv6.never-default) - setting is enabled, the peer route for this peer won't be added automatically. - type: bool - private-key: - description: The 256 bit private-key in base64 encoding. - type: str - private-key-flags: - description: C(NMSettingSecretFlags) indicating how to handle the I(wireguard.private-key) property. - type: int - choices: [ 0, 1, 2 ] -''' - -EXAMPLES = r''' -# These examples are using the following inventory: -# -# ## Directory layout: -# -# |_/inventory/cloud-hosts -# | /group_vars/openstack-stage.yml -# | /host_vars/controller-01.openstack.host.com -# | /host_vars/controller-02.openstack.host.com -# |_/playbook/library/nmcli.py -# | /playbook-add.yml -# | /playbook-del.yml -# ``` -# -# ## inventory examples -# ### groups_vars -# ```yml -# --- -# #devops_os_define_network -# storage_gw: "192.0.2.254" -# external_gw: "198.51.100.254" -# tenant_gw: "203.0.113.254" -# -# #Team vars -# nmcli_team: -# - conn_name: tenant -# ip4: '{{ tenant_ip }}' -# gw4: '{{ tenant_gw }}' -# - conn_name: external -# ip4: '{{ external_ip }}' -# gw4: '{{ external_gw }}' -# - conn_name: storage -# ip4: '{{ storage_ip }}' -# gw4: '{{ storage_gw }}' -# nmcli_team_slave: -# - conn_name: em1 -# ifname: em1 -# master: tenant -# - conn_name: em2 -# ifname: em2 -# master: tenant -# - conn_name: p2p1 -# ifname: p2p1 -# master: storage -# - conn_name: p2p2 -# ifname: p2p2 -# master: external -# -# #bond vars -# nmcli_bond: -# - conn_name: tenant -# ip4: '{{ tenant_ip }}' -# gw4: '' -# mode: balance-rr -# - conn_name: external -# ip4: '{{ external_ip }}' -# gw4: '' -# mode: balance-rr -# - conn_name: storage -# ip4: '{{ storage_ip }}' -# gw4: '{{ storage_gw }}' -# mode: balance-rr -# nmcli_bond_slave: -# - conn_name: em1 -# ifname: em1 -# master: tenant -# - conn_name: em2 -# ifname: em2 -# master: tenant -# - conn_name: p2p1 -# ifname: p2p1 -# master: storage -# - conn_name: p2p2 -# ifname: p2p2 -# master: external -# -# #ethernet vars -# nmcli_ethernet: -# - conn_name: em1 -# ifname: em1 -# ip4: -# - '{{ tenant_ip }}' -# - '{{ second_tenant_ip }}' -# gw4: '{{ tenant_gw }}' -# - conn_name: em2 -# ifname: em2 -# ip4: '{{ tenant_ip1 }}' -# gw4: '{{ tenant_gw }}' -# - conn_name: p2p1 -# ifname: p2p1 -# ip4: '{{ storage_ip }}' -# gw4: '{{ storage_gw }}' -# - conn_name: p2p2 -# ifname: p2p2 -# ip4: '{{ external_ip }}' -# gw4: '{{ external_gw }}' -# ``` -# -# ### host_vars -# ```yml -# --- -# storage_ip: "192.0.2.91/23" -# external_ip: "198.51.100.23/21" -# tenant_ip: "203.0.113.77/23" -# second_tenant_ip: "204.0.113.77/23" -# ``` - - - -## playbook-add.yml example - ---- -- hosts: openstack-stage - remote_user: root - tasks: - - - name: Install needed network manager libs - ansible.builtin.package: - name: - - NetworkManager-libnm - - nm-connection-editor - - libsemanage-python - - policycoreutils-python - state: present - -##### Working with all cloud nodes - Teaming - - name: Try nmcli add team - conn_name only & ip4 gw4 - community.general.nmcli: - type: team - conn_name: '{{ item.conn_name }}' - ip4: '{{ item.ip4 }}' - gw4: '{{ item.gw4 }}' - state: present - with_items: - - '{{ nmcli_team }}' - - - name: Try nmcli add teams-slave - community.general.nmcli: - type: team-slave - conn_name: '{{ item.conn_name }}' - ifname: '{{ item.ifname }}' - master: '{{ item.master }}' - state: present - with_items: - - '{{ nmcli_team_slave }}' - -###### Working with all cloud nodes - Bonding - - name: Try nmcli add bond - conn_name only & ip4 gw4 mode - community.general.nmcli: - type: bond - conn_name: '{{ item.conn_name }}' - ip4: '{{ item.ip4 }}' - gw4: '{{ item.gw4 }}' - mode: '{{ item.mode }}' - state: present - with_items: - - '{{ nmcli_bond }}' - - - name: Try nmcli add bond-slave - community.general.nmcli: - type: bond-slave - conn_name: '{{ item.conn_name }}' - ifname: '{{ item.ifname }}' - master: '{{ item.master }}' - state: present - with_items: - - '{{ nmcli_bond_slave }}' - -##### Working with all cloud nodes - Ethernet - - name: Try nmcli add Ethernet - conn_name only & ip4 gw4 - community.general.nmcli: - type: ethernet - conn_name: '{{ item.conn_name }}' - ip4: '{{ item.ip4 }}' - gw4: '{{ item.gw4 }}' - state: present - with_items: - - '{{ nmcli_ethernet }}' - -## playbook-del.yml example -- hosts: openstack-stage - remote_user: root - tasks: - - - name: Try nmcli del team - multiple - community.general.nmcli: - conn_name: '{{ item.conn_name }}' - state: absent - with_items: - - conn_name: em1 - - conn_name: em2 - - conn_name: p1p1 - - conn_name: p1p2 - - conn_name: p2p1 - - conn_name: p2p2 - - conn_name: tenant - - conn_name: storage - - conn_name: external - - conn_name: team-em1 - - conn_name: team-em2 - - conn_name: team-p1p1 - - conn_name: team-p1p2 - - conn_name: team-p2p1 - - conn_name: team-p2p2 - - - name: Add an Ethernet connection with static IP configuration - community.general.nmcli: - conn_name: my-eth1 - ifname: eth1 - type: ethernet - ip4: 192.0.2.100/24 - gw4: 192.0.2.1 - state: present - - - name: Add an Team connection with static IP configuration - community.general.nmcli: - conn_name: my-team1 - ifname: my-team1 - type: team - ip4: 192.0.2.100/24 - gw4: 192.0.2.1 - state: present - autoconnect: yes - - - name: Optionally, at the same time specify IPv6 addresses for the device - community.general.nmcli: - conn_name: my-eth1 - ifname: eth1 - type: ethernet - ip4: 192.0.2.100/24 - gw4: 192.0.2.1 - ip6: 2001:db8::cafe - gw6: 2001:db8::1 - state: present - - - name: Add two IPv4 DNS server addresses - community.general.nmcli: - conn_name: my-eth1 - type: ethernet - dns4: - - 192.0.2.53 - - 198.51.100.53 - state: present - - - name: Make a profile usable for all compatible Ethernet interfaces - community.general.nmcli: - ctype: ethernet - name: my-eth1 - ifname: '*' - state: present - - - name: Change the property of a setting e.g. MTU - community.general.nmcli: - conn_name: my-eth1 - mtu: 9000 - type: ethernet - state: present - - - name: Add second ip4 address - community.general.nmcli: - conn_name: my-eth1 - ifname: eth1 - type: ethernet - ip4: - - 192.0.2.100/24 - - 192.0.3.100/24 - state: present - - - name: Add second ip6 address - community.general.nmcli: - conn_name: my-eth1 - ifname: eth1 - type: ethernet - ip6: - - 2001:db8::cafe - - 2002:db8::cafe - state: present - - - name: Add VxLan - community.general.nmcli: - type: vxlan - conn_name: vxlan_test1 - vxlan_id: 16 - vxlan_local: 192.168.1.2 - vxlan_remote: 192.168.1.5 - - - name: Add gre - community.general.nmcli: - type: gre - conn_name: gre_test1 - ip_tunnel_dev: eth0 - ip_tunnel_local: 192.168.1.2 - ip_tunnel_remote: 192.168.1.5 - - - name: Add ipip - community.general.nmcli: - type: ipip - conn_name: ipip_test1 - ip_tunnel_dev: eth0 - ip_tunnel_local: 192.168.1.2 - ip_tunnel_remote: 192.168.1.5 - - - name: Add sit - community.general.nmcli: - type: sit - conn_name: sit_test1 - ip_tunnel_dev: eth0 - ip_tunnel_local: 192.168.1.2 - ip_tunnel_remote: 192.168.1.5 - - - name: Add zone - community.general.nmcli: - type: ethernet - conn_name: my-eth1 - zone: external - state: present - -# nmcli exits with status 0 if it succeeds and exits with a status greater -# than zero when there is a failure. The following list of status codes may be -# returned: -# -# - 0 Success - indicates the operation succeeded -# - 1 Unknown or unspecified error -# - 2 Invalid user input, wrong nmcli invocation -# - 3 Timeout expired (see --wait option) -# - 4 Connection activation failed -# - 5 Connection deactivation failed -# - 6 Disconnecting device failed -# - 7 Connection deletion failed -# - 8 NetworkManager is not running -# - 9 nmcli and NetworkManager versions mismatch -# - 10 Connection, device, or access point does not exist. - -- name: Create the wifi connection - community.general.nmcli: - type: wifi - conn_name: Brittany - ifname: wlp4s0 - ssid: Brittany - wifi_sec: - key-mgmt: wpa-psk - psk: my_password - autoconnect: true - state: present - -- name: Create a hidden AP mode wifi connection - community.general.nmcli: - type: wifi - conn_name: ChocoMaster - ifname: wlo1 - ssid: ChocoMaster - wifi: - hidden: true - mode: ap - autoconnect: true - state: present - -- name: Create a gsm connection - community.general.nmcli: - type: gsm - conn_name: my-gsm-provider - ifname: cdc-wdm0 - gsm: - apn: my.provider.apn - username: my-provider-username - password: my-provider-password - pin: my-sim-pin - autoconnect: true - state: present - -- name: Create a wireguard connection - community.general.nmcli: - type: wireguard - conn_name: my-wg-provider - ifname: mywg0 - wireguard: - listen-port: 51820 - private-key: my-private-key - autoconnect: true - state: present - -''' - -RETURN = r"""# -""" - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_text -import re - - -class NmcliModuleError(Exception): - pass - - -class Nmcli(object): - """ - This is the generic nmcli manipulation class that is subclassed based on platform. - A subclass may wish to override the following action methods:- - - create_connection() - - delete_connection() - - edit_connection() - - modify_connection() - - show_connection() - - up_connection() - - down_connection() - All subclasses MUST define platform and distribution (which may be None). - """ - - platform = 'Generic' - distribution = None - - SECRET_OPTIONS = ( - '802-11-wireless-security.leap-password', - '802-11-wireless-security.psk', - '802-11-wireless-security.wep-key0', - '802-11-wireless-security.wep-key1', - '802-11-wireless-security.wep-key2', - '802-11-wireless-security.wep-key3' - ) - - def __init__(self, module): - self.module = module - self.state = module.params['state'] - self.ignore_unsupported_suboptions = module.params['ignore_unsupported_suboptions'] - self.autoconnect = module.params['autoconnect'] - self.conn_name = module.params['conn_name'] - self.master = module.params['master'] - self.ifname = module.params['ifname'] - self.type = module.params['type'] - self.ip4 = module.params['ip4'] - self.gw4 = module.params['gw4'] - self.gw4_ignore_auto = module.params['gw4_ignore_auto'] - self.routes4 = module.params['routes4'] - self.route_metric4 = module.params['route_metric4'] - self.routing_rules4 = module.params['routing_rules4'] - self.never_default4 = module.params['never_default4'] - self.dns4 = module.params['dns4'] - self.dns4_search = module.params['dns4_search'] - self.dns4_ignore_auto = module.params['dns4_ignore_auto'] - self.method4 = module.params['method4'] - self.may_fail4 = module.params['may_fail4'] - self.ip6 = module.params['ip6'] - self.gw6 = module.params['gw6'] - self.gw6_ignore_auto = module.params['gw6_ignore_auto'] - self.routes6 = module.params['routes6'] - self.route_metric6 = module.params['route_metric6'] - self.dns6 = module.params['dns6'] - self.dns6_search = module.params['dns6_search'] - self.dns6_ignore_auto = module.params['dns6_ignore_auto'] - self.method6 = module.params['method6'] - self.ip_privacy6 = module.params['ip_privacy6'] - self.addr_gen_mode6 = module.params['addr_gen_mode6'] - self.mtu = module.params['mtu'] - self.stp = module.params['stp'] - self.priority = module.params['priority'] - self.mode = module.params['mode'] - self.miimon = module.params['miimon'] - self.primary = module.params['primary'] - self.downdelay = module.params['downdelay'] - self.updelay = module.params['updelay'] - self.arp_interval = module.params['arp_interval'] - self.arp_ip_target = module.params['arp_ip_target'] - self.slavepriority = module.params['slavepriority'] - self.forwarddelay = module.params['forwarddelay'] - self.hellotime = module.params['hellotime'] - self.maxage = module.params['maxage'] - self.ageingtime = module.params['ageingtime'] - self.hairpin = module.params['hairpin'] - self.path_cost = module.params['path_cost'] - self.mac = module.params['mac'] - self.runner = module.params['runner'] - self.runner_hwaddr_policy = module.params['runner_hwaddr_policy'] - self.vlanid = module.params['vlanid'] - self.vlandev = module.params['vlandev'] - self.flags = module.params['flags'] - self.ingress = module.params['ingress'] - self.egress = module.params['egress'] - self.vxlan_id = module.params['vxlan_id'] - self.vxlan_local = module.params['vxlan_local'] - self.vxlan_remote = module.params['vxlan_remote'] - self.ip_tunnel_dev = module.params['ip_tunnel_dev'] - self.ip_tunnel_local = module.params['ip_tunnel_local'] - self.ip_tunnel_remote = module.params['ip_tunnel_remote'] - self.ip_tunnel_input_key = module.params['ip_tunnel_input_key'] - self.ip_tunnel_output_key = module.params['ip_tunnel_output_key'] - self.nmcli_bin = self.module.get_bin_path('nmcli', True) - self.dhcp_client_id = module.params['dhcp_client_id'] - self.zone = module.params['zone'] - self.ssid = module.params['ssid'] - self.wifi = module.params['wifi'] - self.wifi_sec = module.params['wifi_sec'] - self.gsm = module.params['gsm'] - self.wireguard = module.params['wireguard'] - - if self.method4: - self.ipv4_method = self.method4 - elif self.type in ('dummy', 'wireguard') and not self.ip4: - self.ipv4_method = 'disabled' - elif self.ip4: - self.ipv4_method = 'manual' - else: - self.ipv4_method = None - - if self.method6: - self.ipv6_method = self.method6 - elif self.type in ('dummy', 'wireguard') and not self.ip6: - self.ipv6_method = 'disabled' - elif self.ip6: - self.ipv6_method = 'manual' - else: - self.ipv6_method = None - - self.edit_commands = [] - - def execute_command(self, cmd, use_unsafe_shell=False, data=None): - if isinstance(cmd, list): - cmd = [to_text(item) for item in cmd] - else: - cmd = to_text(cmd) - return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data) - - def execute_edit_commands(self, commands, arguments): - arguments = arguments or [] - cmd = [self.nmcli_bin, 'con', 'edit'] + arguments - data = "\n".join(commands) - return self.execute_command(cmd, data=data) - - def connection_options(self, detect_change=False): - # Options common to multiple connection types. - options = { - 'connection.autoconnect': self.autoconnect, - 'connection.zone': self.zone, - } - - # IP address options. - if self.ip_conn_type and not self.master: - options.update({ - 'ipv4.addresses': self.enforce_ipv4_cidr_notation(self.ip4), - 'ipv4.dhcp-client-id': self.dhcp_client_id, - 'ipv4.dns': self.dns4, - 'ipv4.dns-search': self.dns4_search, - 'ipv4.ignore-auto-dns': self.dns4_ignore_auto, - 'ipv4.gateway': self.gw4, - 'ipv4.ignore-auto-routes': self.gw4_ignore_auto, - 'ipv4.routes': self.routes4, - 'ipv4.route-metric': self.route_metric4, - 'ipv4.routing-rules': self.routing_rules4, - 'ipv4.never-default': self.never_default4, - 'ipv4.method': self.ipv4_method, - 'ipv4.may-fail': self.may_fail4, - 'ipv6.addresses': self.enforce_ipv6_cidr_notation(self.ip6), - 'ipv6.dns': self.dns6, - 'ipv6.dns-search': self.dns6_search, - 'ipv6.ignore-auto-dns': self.dns6_ignore_auto, - 'ipv6.gateway': self.gw6, - 'ipv6.ignore-auto-routes': self.gw6_ignore_auto, - 'ipv6.routes': self.routes6, - 'ipv6.route-metric': self.route_metric6, - 'ipv6.method': self.ipv6_method, - 'ipv6.ip6-privacy': self.ip_privacy6, - 'ipv6.addr-gen-mode': self.addr_gen_mode6 - }) - - # Layer 2 options. - if self.mac: - options.update({self.mac_setting: self.mac}) - - if self.mtu_conn_type: - options.update({self.mtu_setting: self.mtu}) - - # Connections that can have a master. - if self.slave_conn_type: - options.update({ - 'connection.master': self.master, - }) - - # Options specific to a connection type. - if self.type == 'bond': - options.update({ - 'arp-interval': self.arp_interval, - 'arp-ip-target': self.arp_ip_target, - 'downdelay': self.downdelay, - 'miimon': self.miimon, - 'mode': self.mode, - 'primary': self.primary, - 'updelay': self.updelay, - }) - elif self.type == 'bond-slave': - options.update({ - 'connection.slave-type': 'bond', - }) - elif self.type == 'bridge': - options.update({ - 'bridge.ageing-time': self.ageingtime, - 'bridge.forward-delay': self.forwarddelay, - 'bridge.hello-time': self.hellotime, - 'bridge.max-age': self.maxage, - 'bridge.priority': self.priority, - 'bridge.stp': self.stp, - }) - elif self.type == 'team': - options.update({ - 'team.runner': self.runner, - 'team.runner-hwaddr-policy': self.runner_hwaddr_policy, - }) - elif self.type == 'bridge-slave': - options.update({ - 'connection.slave-type': 'bridge', - 'bridge-port.path-cost': self.path_cost, - 'bridge-port.hairpin-mode': self.hairpin, - 'bridge-port.priority': self.slavepriority, - }) - elif self.type == 'team-slave': - options.update({ - 'connection.slave-type': 'team', - }) - elif self.tunnel_conn_type: - options.update({ - 'ip-tunnel.local': self.ip_tunnel_local, - 'ip-tunnel.mode': self.type, - 'ip-tunnel.parent': self.ip_tunnel_dev, - 'ip-tunnel.remote': self.ip_tunnel_remote, - }) - if self.type == 'gre': - options.update({ - 'ip-tunnel.input-key': self.ip_tunnel_input_key, - 'ip-tunnel.output-key': self.ip_tunnel_output_key - }) - elif self.type == 'vlan': - options.update({ - 'vlan.id': self.vlanid, - 'vlan.parent': self.vlandev, - 'vlan.flags': self.flags, - 'vlan.ingress': self.ingress, - 'vlan.egress': self.egress, - }) - elif self.type == 'vxlan': - options.update({ - 'vxlan.id': self.vxlan_id, - 'vxlan.local': self.vxlan_local, - 'vxlan.remote': self.vxlan_remote, - }) - elif self.type == 'wifi': - options.update({ - '802-11-wireless.ssid': self.ssid, - 'connection.slave-type': 'bond' if self.master else None, - }) - if self.wifi: - for name, value in self.wifi.items(): - options.update({ - '802-11-wireless.%s' % name: value - }) - if self.wifi_sec: - for name, value in self.wifi_sec.items(): - options.update({ - '802-11-wireless-security.%s' % name: value - }) - elif self.type == 'gsm': - if self.gsm: - for name, value in self.gsm.items(): - options.update({ - 'gsm.%s' % name: value, - }) - elif self.type == 'wireguard': - if self.wireguard: - for name, value in self.wireguard.items(): - options.update({ - 'wireguard.%s' % name: value, - }) - # Convert settings values based on the situation. - for setting, value in options.items(): - setting_type = self.settings_type(setting) - convert_func = None - if setting_type is bool: - # Convert all bool options to yes/no. - convert_func = self.bool_to_string - if detect_change: - if setting in ('vlan.id', 'vxlan.id'): - # Convert VLAN/VXLAN IDs to text when detecting changes. - convert_func = to_text - elif setting == self.mtu_setting: - # MTU is 'auto' by default when detecting changes. - convert_func = self.mtu_to_string - elif setting == 'ipv6.ip6-privacy': - convert_func = self.ip6_privacy_to_num - elif setting_type is list: - # Convert lists to strings for nmcli create/modify commands. - convert_func = self.list_to_string - - if callable(convert_func): - options[setting] = convert_func(options[setting]) - - return options - - @property - def ip_conn_type(self): - return self.type in ( - 'bond', - 'bridge', - 'dummy', - 'ethernet', - '802-3-ethernet', - 'generic', - 'gre', - 'infiniband', - 'ipip', - 'sit', - 'team', - 'vlan', - 'wifi', - '802-11-wireless', - 'gsm', - 'wireguard', - ) - - @property - def mac_setting(self): - if self.type == 'bridge': - return 'bridge.mac-address' - else: - return '802-3-ethernet.cloned-mac-address' - - @property - def mtu_conn_type(self): - return self.type in ( - 'dummy', - 'ethernet', - 'team-slave', - ) - - @property - def mtu_setting(self): - return '802-3-ethernet.mtu' - - @staticmethod - def mtu_to_string(mtu): - if not mtu: - return 'auto' - else: - return to_text(mtu) - - @staticmethod - def ip6_privacy_to_num(privacy): - ip6_privacy_values = { - 'disabled': '0', - 'prefer-public-addr': '1 (enabled, prefer public IP)', - 'prefer-temp-addr': '2 (enabled, prefer temporary IP)', - 'unknown': '-1', - } - - if privacy is None: - return None - - if privacy not in ip6_privacy_values: - raise AssertionError('{privacy} is invalid ip_privacy6 option'.format(privacy=privacy)) - - return ip6_privacy_values[privacy] - - @property - def slave_conn_type(self): - return self.type in ( - 'bond-slave', - 'bridge-slave', - 'team-slave', - 'wifi', - ) - - @property - def tunnel_conn_type(self): - return self.type in ( - 'gre', - 'ipip', - 'sit', - ) - - @staticmethod - def enforce_ipv4_cidr_notation(ip4_addresses): - if ip4_addresses is None: - return None - return [address if '/' in address else address + '/32' for address in ip4_addresses] - - @staticmethod - def enforce_ipv6_cidr_notation(ip6_addresses): - if ip6_addresses is None: - return None - return [address if '/' in address else address + '/128' for address in ip6_addresses] - - @staticmethod - def bool_to_string(boolean): - if boolean: - return "yes" - else: - return "no" - - @staticmethod - def list_to_string(lst): - return ",".join(lst or [""]) - - @staticmethod - def settings_type(setting): - if setting in ('bridge.stp', - 'bridge-port.hairpin-mode', - 'connection.autoconnect', - 'ipv4.never-default', - 'ipv4.ignore-auto-dns', - 'ipv4.ignore-auto-routes', - 'ipv4.may-fail', - 'ipv6.ignore-auto-dns', - 'ipv6.ignore-auto-routes', - '802-11-wireless.hidden'): - return bool - elif setting in ('ipv4.addresses', - 'ipv6.addresses', - 'ipv4.dns', - 'ipv4.dns-search', - 'ipv4.routes', - 'ipv4.routing-rules', - 'ipv6.dns', - 'ipv6.dns-search', - 'ipv6.routes', - '802-11-wireless-security.group', - '802-11-wireless-security.leap-password-flags', - '802-11-wireless-security.pairwise', - '802-11-wireless-security.proto', - '802-11-wireless-security.psk-flags', - '802-11-wireless-security.wep-key-flags', - '802-11-wireless.mac-address-blacklist'): - return list - return str - - def list_connection_info(self): - cmd = [self.nmcli_bin, '--fields', 'name', '--terse', 'con', 'show'] - (rc, out, err) = self.execute_command(cmd) - if rc != 0: - raise NmcliModuleError(err) - return out.splitlines() - - def connection_exists(self): - return self.conn_name in self.list_connection_info() - - def down_connection(self): - cmd = [self.nmcli_bin, 'con', 'down', self.conn_name] - return self.execute_command(cmd) - - def up_connection(self): - cmd = [self.nmcli_bin, 'con', 'up', self.conn_name] - return self.execute_command(cmd) - - def connection_update(self, nmcli_command): - if nmcli_command == 'create': - cmd = [self.nmcli_bin, 'con', 'add', 'type'] - if self.tunnel_conn_type: - cmd.append('ip-tunnel') - else: - cmd.append(self.type) - cmd.append('con-name') - elif nmcli_command == 'modify': - cmd = [self.nmcli_bin, 'con', 'modify'] - else: - self.module.fail_json(msg="Invalid nmcli command.") - cmd.append(self.conn_name) - - # Use connection name as default for interface name on creation. - if nmcli_command == 'create' and self.ifname is None: - ifname = self.conn_name - else: - ifname = self.ifname - - options = { - 'connection.interface-name': ifname, - } - - options.update(self.connection_options()) - - # Constructing the command. - for key, value in options.items(): - if value is not None: - if key in self.SECRET_OPTIONS: - self.edit_commands += ['set %s %s' % (key, value)] - continue - cmd.extend([key, value]) - - return self.execute_command(cmd) - - def create_connection(self): - status = self.connection_update('create') - if status[0] == 0 and self.edit_commands: - status = self.edit_connection() - if self.create_connection_up: - status = self.up_connection() - return status - - @property - def create_connection_up(self): - if self.type in ('bond', 'dummy', 'ethernet', 'infiniband', 'wifi'): - if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None): - return True - elif self.type == 'team': - if (self.dns4 is not None) or (self.dns6 is not None): - return True - return False - - def remove_connection(self): - # self.down_connection() - cmd = [self.nmcli_bin, 'con', 'del', self.conn_name] - return self.execute_command(cmd) - - def modify_connection(self): - status = self.connection_update('modify') - if status[0] == 0 and self.edit_commands: - status = self.edit_connection() - return status - - def edit_connection(self): - commands = self.edit_commands + ['save', 'quit'] - return self.execute_edit_commands(commands, arguments=[self.conn_name]) - - def show_connection(self): - cmd = [self.nmcli_bin, '--show-secrets', 'con', 'show', self.conn_name] - - (rc, out, err) = self.execute_command(cmd) - - if rc != 0: - raise NmcliModuleError(err) - - p_enum_value = re.compile(r'^([-]?\d+) \((\w+)\)$') - - conn_info = dict() - for line in out.splitlines(): - pair = line.split(':', 1) - key = pair[0].strip() - key_type = self.settings_type(key) - if key and len(pair) > 1: - raw_value = pair[1].lstrip() - if raw_value == '--': - conn_info[key] = None - elif key == 'bond.options': - # Aliases such as 'miimon', 'downdelay' are equivalent to the +bond.options 'option=value' syntax. - opts = raw_value.split(',') - for opt in opts: - alias_pair = opt.split('=', 1) - if len(alias_pair) > 1: - alias_key = alias_pair[0] - alias_value = alias_pair[1] - conn_info[alias_key] = alias_value - elif key in ('ipv4.routes', 'ipv6.routes'): - conn_info[key] = [s.strip() for s in raw_value.split(';')] - elif key_type == list: - conn_info[key] = [s.strip() for s in raw_value.split(',')] - else: - m_enum = p_enum_value.match(raw_value) - if m_enum is not None: - value = m_enum.group(1) - else: - value = raw_value - conn_info[key] = value - - return conn_info - - def get_supported_properties(self, setting): - properties = [] - - if setting == '802-11-wireless-security': - set_property = 'psk' - set_value = 'FAKEVALUE' - commands = ['set %s.%s %s' % (setting, set_property, set_value)] - else: - commands = [] - - commands += ['print %s' % setting, 'quit', 'yes'] - - (rc, out, err) = self.execute_edit_commands(commands, arguments=['type', self.type]) - - if rc != 0: - raise NmcliModuleError(err) - - for line in out.splitlines(): - prefix = '%s.' % setting - if (line.startswith(prefix)): - pair = line.split(':', 1) - property = pair[0].strip().replace(prefix, '') - properties.append(property) - - return properties - - def check_for_unsupported_properties(self, setting): - if setting == '802-11-wireless': - setting_key = 'wifi' - elif setting == '802-11-wireless-security': - setting_key = 'wifi_sec' - else: - setting_key = setting - - supported_properties = self.get_supported_properties(setting) - unsupported_properties = [] - - for property, value in getattr(self, setting_key).items(): - if property not in supported_properties: - unsupported_properties.append(property) - - if unsupported_properties: - msg_options = [] - for property in unsupported_properties: - msg_options.append('%s.%s' % (setting_key, property)) - - msg = 'Invalid or unsupported option(s): "%s"' % '", "'.join(msg_options) - if self.ignore_unsupported_suboptions: - self.module.warn(msg) - else: - self.module.fail_json(msg=msg) - - return unsupported_properties - - def _compare_conn_params(self, conn_info, options): - changed = False - diff_before = dict() - diff_after = dict() - - for key, value in options.items(): - if not value: - continue - - if key in conn_info: - current_value = conn_info[key] - if key in ('ipv4.routes', 'ipv6.routes') and current_value is not None: - # ipv4.routes and ipv6.routes do not have same options and show_connection() format - # options: ['10.11.0.0/24 10.10.0.2', '10.12.0.0/24 10.10.0.2 200'] - # show_connection(): ['{ ip = 10.11.0.0/24, nh = 10.10.0.2 }', '{ ip = 10.12.0.0/24, nh = 10.10.0.2, mt = 200 }'] - # Need to convert in order to compare both - current_value = [re.sub(r'^{\s*ip\s*=\s*([^, ]+),\s*nh\s*=\s*([^} ]+),\s*mt\s*=\s*([^} ]+)\s*}', r'\1 \2 \3', - route) for route in current_value] - current_value = [re.sub(r'^{\s*ip\s*=\s*([^, ]+),\s*nh\s*=\s*([^} ]+)\s*}', r'\1 \2', route) for route in current_value] - if key == self.mac_setting: - # MAC addresses are case insensitive, nmcli always reports them in uppercase - value = value.upper() - # ensure current_value is also converted to uppercase in case nmcli changes behaviour - current_value = current_value.upper() - if key == 'gsm.apn': - # Depending on version nmcli adds double-qoutes to gsm.apn - # Need to strip them in order to compare both - current_value = current_value.strip('"') - if key == self.mtu_setting and self.mtu is None: - self.mtu = 0 - else: - # parameter does not exist - current_value = None - - if isinstance(current_value, list) and isinstance(value, list): - # compare values between two lists - if sorted(current_value) != sorted(value): - changed = True - elif all([key == self.mtu_setting, self.type == 'dummy', current_value is None, value == 'auto', self.mtu is None]): - value = None - else: - if current_value != to_text(value): - changed = True - - diff_before[key] = current_value - diff_after[key] = value - - diff = { - 'before': diff_before, - 'after': diff_after, - } - return (changed, diff) - - def is_connection_changed(self): - options = { - 'connection.interface-name': self.ifname, - } - - if not self.type: - current_con_type = self.show_connection().get('connection.type') - if current_con_type: - self.type = current_con_type - - options.update(self.connection_options(detect_change=True)) - return self._compare_conn_params(self.show_connection(), options) - - -def main(): - # Parsing argument file - module = AnsibleModule( - argument_spec=dict( - ignore_unsupported_suboptions=dict(type='bool', default=False), - autoconnect=dict(type='bool', default=True), - state=dict(type='str', required=True, choices=['absent', 'present']), - conn_name=dict(type='str', required=True), - master=dict(type='str'), - ifname=dict(type='str'), - type=dict(type='str', - choices=[ - 'bond', - 'bond-slave', - 'bridge', - 'bridge-slave', - 'dummy', - 'ethernet', - 'generic', - 'gre', - 'infiniband', - 'ipip', - 'sit', - 'team', - 'team-slave', - 'vlan', - 'vxlan', - 'wifi', - 'gsm', - 'wireguard', - ]), - ip4=dict(type='list', elements='str'), - gw4=dict(type='str'), - gw4_ignore_auto=dict(type='bool', default=False), - routes4=dict(type='list', elements='str'), - route_metric4=dict(type='int'), - routing_rules4=dict(type='list', elements='str'), - never_default4=dict(type='bool', default=False), - dns4=dict(type='list', elements='str'), - dns4_search=dict(type='list', elements='str'), - dns4_ignore_auto=dict(type='bool', default=False), - method4=dict(type='str', choices=['auto', 'link-local', 'manual', 'shared', 'disabled']), - may_fail4=dict(type='bool', default=True), - dhcp_client_id=dict(type='str'), - ip6=dict(type='list', elements='str'), - gw6=dict(type='str'), - gw6_ignore_auto=dict(type='bool', default=False), - dns6=dict(type='list', elements='str'), - dns6_search=dict(type='list', elements='str'), - dns6_ignore_auto=dict(type='bool', default=False), - routes6=dict(type='list', elements='str'), - route_metric6=dict(type='int'), - method6=dict(type='str', choices=['ignore', 'auto', 'dhcp', 'link-local', 'manual', 'shared', 'disabled']), - ip_privacy6=dict(type='str', choices=['disabled', 'prefer-public-addr', 'prefer-temp-addr', 'unknown']), - addr_gen_mode6=dict(type='str', choices=['eui64', 'stable-privacy']), - # Bond Specific vars - mode=dict(type='str', default='balance-rr', - choices=['802.3ad', 'active-backup', 'balance-alb', 'balance-rr', 'balance-tlb', 'balance-xor', 'broadcast']), - miimon=dict(type='int'), - downdelay=dict(type='int'), - updelay=dict(type='int'), - arp_interval=dict(type='int'), - arp_ip_target=dict(type='str'), - primary=dict(type='str'), - # general usage - mtu=dict(type='int'), - mac=dict(type='str'), - zone=dict(type='str'), - # bridge specific vars - stp=dict(type='bool', default=True), - priority=dict(type='int', default=128), - slavepriority=dict(type='int', default=32), - forwarddelay=dict(type='int', default=15), - hellotime=dict(type='int', default=2), - maxage=dict(type='int', default=20), - ageingtime=dict(type='int', default=300), - hairpin=dict(type='bool', default=True), - path_cost=dict(type='int', default=100), - # team specific vars - runner=dict(type='str', default='roundrobin', - choices=['broadcast', 'roundrobin', 'activebackup', 'loadbalance', 'lacp']), - # team active-backup runner specific options - runner_hwaddr_policy=dict(type='str', choices=['same_all', 'by_active', 'only_active']), - # vlan specific vars - vlanid=dict(type='int'), - vlandev=dict(type='str'), - flags=dict(type='str'), - ingress=dict(type='str'), - egress=dict(type='str'), - # vxlan specific vars - vxlan_id=dict(type='int'), - vxlan_local=dict(type='str'), - vxlan_remote=dict(type='str'), - # ip-tunnel specific vars - ip_tunnel_dev=dict(type='str'), - ip_tunnel_local=dict(type='str'), - ip_tunnel_remote=dict(type='str'), - # ip-tunnel type gre specific vars - ip_tunnel_input_key=dict(type='str', no_log=True), - ip_tunnel_output_key=dict(type='str', no_log=True), - # 802-11-wireless* specific vars - ssid=dict(type='str'), - wifi=dict(type='dict'), - wifi_sec=dict(type='dict', no_log=True), - gsm=dict(type='dict'), - wireguard=dict(type='dict'), - ), - mutually_exclusive=[['never_default4', 'gw4']], - required_if=[("type", "wifi", [("ssid")])], - supports_check_mode=True, - ) - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') - - nmcli = Nmcli(module) - - (rc, out, err) = (None, '', '') - result = {'conn_name': nmcli.conn_name, 'state': nmcli.state} - - # check for issues - if nmcli.conn_name is None: - nmcli.module.fail_json(msg="Please specify a name for the connection") - # team checks - if nmcli.type == "team": - if nmcli.runner_hwaddr_policy and not nmcli.runner == "activebackup": - nmcli.module.fail_json(msg="Runner-hwaddr-policy is only allowed for runner activebackup") - # team-slave checks - if nmcli.type == 'team-slave': - if nmcli.master is None: - nmcli.module.fail_json(msg="Please specify a name for the master when type is %s" % nmcli.type) - if nmcli.ifname is None: - nmcli.module.fail_json(msg="Please specify an interface name for the connection when type is %s" % nmcli.type) - if nmcli.type == 'wifi': - unsupported_properties = {} - if nmcli.wifi: - if 'ssid' in nmcli.wifi: - module.warn("Ignoring option 'wifi.ssid', it must be specified with option 'ssid'") - del nmcli.wifi['ssid'] - unsupported_properties['wifi'] = nmcli.check_for_unsupported_properties('802-11-wireless') - if nmcli.wifi_sec: - unsupported_properties['wifi_sec'] = nmcli.check_for_unsupported_properties('802-11-wireless-security') - if nmcli.ignore_unsupported_suboptions and unsupported_properties: - for setting_key, properties in unsupported_properties.items(): - for property in properties: - del getattr(nmcli, setting_key)[property] - - try: - if nmcli.state == 'absent': - if nmcli.connection_exists(): - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = nmcli.down_connection() - (rc, out, err) = nmcli.remove_connection() - if rc != 0: - module.fail_json(name=('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc) - - elif nmcli.state == 'present': - if nmcli.connection_exists(): - changed, diff = nmcli.is_connection_changed() - if module._diff: - result['diff'] = diff - - if changed: - # modify connection (note: this function is check mode aware) - # result['Connection']=('Connection %s of Type %s is not being added' % (nmcli.conn_name, nmcli.type)) - result['Exists'] = 'Connections do exist so we are modifying them' - if module.check_mode: - module.exit_json(changed=True, **result) - (rc, out, err) = nmcli.modify_connection() - else: - result['Exists'] = 'Connections already exist and no changes made' - if module.check_mode: - module.exit_json(changed=False, **result) - if not nmcli.connection_exists(): - result['Connection'] = ('Connection %s of Type %s is being added' % (nmcli.conn_name, nmcli.type)) - if module.check_mode: - module.exit_json(changed=True, **result) - (rc, out, err) = nmcli.create_connection() - if rc is not None and rc != 0: - module.fail_json(name=nmcli.conn_name, msg=err, rc=rc) - except NmcliModuleError as e: - module.fail_json(name=nmcli.conn_name, msg=str(e)) - - if rc is None: - result['changed'] = False - else: - result['changed'] = True - if out: - result['stdout'] = out - if err: - result['stderr'] = err - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/net_tools/nsupdate.py b/ansible_collections/community/general/plugins/modules/net_tools/nsupdate.py deleted file mode 100644 index fc0d5e1c..00000000 --- a/ansible_collections/community/general/plugins/modules/net_tools/nsupdate.py +++ /dev/null @@ -1,483 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2016, Marcin Skarbek -# (c) 2016, Andreas Olsson -# (c) 2017, Loic Blot -# -# This module was ported from https://github.com/mskarbek/ansible-nsupdate -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: nsupdate - -short_description: Manage DNS records. -description: - - Create, update and remove DNS records using DDNS updates -requirements: - - dnspython -author: "Loic Blot (@nerzhul)" -options: - state: - description: - - Manage DNS record. - choices: ['present', 'absent'] - default: 'present' - type: str - server: - description: - - Apply DNS modification on this server, specified by IPv4 or IPv6 address. - required: true - type: str - port: - description: - - Use this TCP port when connecting to C(server). - default: 53 - type: int - key_name: - description: - - Use TSIG key name to authenticate against DNS C(server) - type: str - key_secret: - description: - - Use TSIG key secret, associated with C(key_name), to authenticate against C(server) - type: str - key_algorithm: - description: - - Specify key algorithm used by C(key_secret). - choices: ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224', 'hmac-sha256', 'hmac-sha384', - 'hmac-sha512'] - default: 'hmac-md5' - type: str - zone: - description: - - DNS record will be modified on this C(zone). - - When omitted DNS will be queried to attempt finding the correct zone. - - Starting with Ansible 2.7 this parameter is optional. - type: str - record: - description: - - Sets the DNS record to modify. When zone is omitted this has to be absolute (ending with a dot). - required: true - type: str - type: - description: - - Sets the record type. - default: 'A' - type: str - ttl: - description: - - Sets the record TTL. - default: 3600 - type: int - value: - description: - - Sets the record value. - type: list - elements: str - protocol: - description: - - Sets the transport protocol (TCP or UDP). TCP is the recommended and a more robust option. - default: 'tcp' - choices: ['tcp', 'udp'] - type: str -''' - -EXAMPLES = ''' -- name: Add or modify ansible.example.org A to 192.168.1.1" - community.general.nsupdate: - key_name: "nsupdate" - key_secret: "+bFQtBCta7j2vWkjPkAFtgA==" - server: "10.1.1.1" - zone: "example.org" - record: "ansible" - value: "192.168.1.1" - -- name: Add or modify ansible.example.org A to 192.168.1.1, 192.168.1.2 and 192.168.1.3" - community.general.nsupdate: - key_name: "nsupdate" - key_secret: "+bFQtBCta7j2vWkjPkAFtgA==" - server: "10.1.1.1" - zone: "example.org" - record: "ansible" - value: ["192.168.1.1", "192.168.1.2", "192.168.1.3"] - -- name: Remove puppet.example.org CNAME - community.general.nsupdate: - key_name: "nsupdate" - key_secret: "+bFQtBCta7j2vWkjPkAFtgA==" - server: "10.1.1.1" - zone: "example.org" - record: "puppet" - type: "CNAME" - state: absent - -- name: Add 1.1.168.192.in-addr.arpa. PTR for ansible.example.org - community.general.nsupdate: - key_name: "nsupdate" - key_secret: "+bFQtBCta7j2vWkjPkAFtgA==" - server: "10.1.1.1" - record: "1.1.168.192.in-addr.arpa." - type: "PTR" - value: "ansible.example.org." - state: present - -- name: Remove 1.1.168.192.in-addr.arpa. PTR - community.general.nsupdate: - key_name: "nsupdate" - key_secret: "+bFQtBCta7j2vWkjPkAFtgA==" - server: "10.1.1.1" - record: "1.1.168.192.in-addr.arpa." - type: "PTR" - state: absent -''' - -RETURN = ''' -changed: - description: If module has modified record - returned: success - type: str -record: - description: DNS record - returned: success - type: str - sample: 'ansible' -ttl: - description: DNS record TTL - returned: success - type: int - sample: 86400 -type: - description: DNS record type - returned: success - type: str - sample: 'CNAME' -value: - description: DNS record value(s) - returned: success - type: list - sample: '192.168.1.1' -zone: - description: DNS record zone - returned: success - type: str - sample: 'example.org.' -dns_rc: - description: dnspython return code - returned: always - type: int - sample: 4 -dns_rc_str: - description: dnspython return code (string representation) - returned: always - type: str - sample: 'REFUSED' -''' - -import traceback - -from binascii import Error as binascii_error -from socket import error as socket_error - -DNSPYTHON_IMP_ERR = None -try: - import dns.update - import dns.query - import dns.tsigkeyring - import dns.message - import dns.resolver - - HAVE_DNSPYTHON = True -except ImportError: - DNSPYTHON_IMP_ERR = traceback.format_exc() - HAVE_DNSPYTHON = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -class RecordManager(object): - def __init__(self, module): - self.module = module - - if module.params['key_name']: - try: - self.keyring = dns.tsigkeyring.from_text({ - module.params['key_name']: module.params['key_secret'] - }) - except TypeError: - module.fail_json(msg='Missing key_secret') - except binascii_error as e: - module.fail_json(msg='TSIG key error: %s' % to_native(e)) - else: - self.keyring = None - - if module.params['key_algorithm'] == 'hmac-md5': - self.algorithm = 'HMAC-MD5.SIG-ALG.REG.INT' - else: - self.algorithm = module.params['key_algorithm'] - - if module.params['zone'] is None: - if module.params['record'][-1] != '.': - self.module.fail_json(msg='record must be absolute when omitting zone parameter') - self.zone = self.lookup_zone() - else: - self.zone = module.params['zone'] - - if self.zone[-1] != '.': - self.zone += '.' - - if module.params['record'][-1] != '.': - self.fqdn = module.params['record'] + '.' + self.zone - else: - self.fqdn = module.params['record'] - - if self.module.params['type'].lower() == 'txt' and self.module.params['value'] is not None: - self.value = list(map(self.txt_helper, self.module.params['value'])) - else: - self.value = self.module.params['value'] - - self.dns_rc = 0 - - def txt_helper(self, entry): - if entry[0] == '"' and entry[-1] == '"': - return entry - return '"{text}"'.format(text=entry) - - def lookup_zone(self): - name = dns.name.from_text(self.module.params['record']) - while True: - query = dns.message.make_query(name, dns.rdatatype.SOA) - if self.keyring: - query.use_tsig(keyring=self.keyring, algorithm=self.algorithm) - try: - if self.module.params['protocol'] == 'tcp': - lookup = dns.query.tcp(query, self.module.params['server'], timeout=10, port=self.module.params['port']) - else: - lookup = dns.query.udp(query, self.module.params['server'], timeout=10, port=self.module.params['port']) - except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e: - self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e))) - except (socket_error, dns.exception.Timeout) as e: - self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e))) - if lookup.rcode() in [dns.rcode.SERVFAIL, dns.rcode.REFUSED]: - self.module.fail_json(msg='Zone lookup failure: \'%s\' will not respond to queries regarding \'%s\'.' % ( - self.module.params['server'], self.module.params['record'])) - try: - zone = lookup.authority[0].name - if zone == name: - return zone.to_text() - except IndexError: - pass - try: - name = name.parent() - except dns.name.NoParent: - self.module.fail_json(msg='Zone lookup of \'%s\' failed for unknown reason.' % (self.module.params['record'])) - - def __do_update(self, update): - response = None - try: - if self.module.params['protocol'] == 'tcp': - response = dns.query.tcp(update, self.module.params['server'], timeout=10, port=self.module.params['port']) - else: - response = dns.query.udp(update, self.module.params['server'], timeout=10, port=self.module.params['port']) - except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e: - self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e))) - except (socket_error, dns.exception.Timeout) as e: - self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e))) - return response - - def create_or_update_record(self): - result = {'changed': False, 'failed': False} - - exists = self.record_exists() - if exists in [0, 2]: - if self.module.check_mode: - self.module.exit_json(changed=True) - - if exists == 0: - self.dns_rc = self.create_record() - if self.dns_rc != 0: - result['msg'] = "Failed to create DNS record (rc: %d)" % self.dns_rc - - elif exists == 2: - self.dns_rc = self.modify_record() - if self.dns_rc != 0: - result['msg'] = "Failed to update DNS record (rc: %d)" % self.dns_rc - - if self.dns_rc != 0: - result['failed'] = True - else: - result['changed'] = True - - else: - result['changed'] = False - - return result - - def create_record(self): - update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm) - for entry in self.value: - try: - update.add(self.module.params['record'], - self.module.params['ttl'], - self.module.params['type'], - entry) - except AttributeError: - self.module.fail_json(msg='value needed when state=present') - except dns.exception.SyntaxError: - self.module.fail_json(msg='Invalid/malformed value') - - response = self.__do_update(update) - return dns.message.Message.rcode(response) - - def modify_record(self): - update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm) - update.delete(self.module.params['record'], self.module.params['type']) - for entry in self.value: - try: - update.add(self.module.params['record'], - self.module.params['ttl'], - self.module.params['type'], - entry) - except AttributeError: - self.module.fail_json(msg='value needed when state=present') - except dns.exception.SyntaxError: - self.module.fail_json(msg='Invalid/malformed value') - response = self.__do_update(update) - - return dns.message.Message.rcode(response) - - def remove_record(self): - result = {'changed': False, 'failed': False} - - if self.record_exists() == 0: - return result - - # Check mode and record exists, declared fake change. - if self.module.check_mode: - self.module.exit_json(changed=True) - - update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm) - update.delete(self.module.params['record'], self.module.params['type']) - - response = self.__do_update(update) - self.dns_rc = dns.message.Message.rcode(response) - - if self.dns_rc != 0: - result['failed'] = True - result['msg'] = "Failed to delete record (rc: %d)" % self.dns_rc - else: - result['changed'] = True - - return result - - def record_exists(self): - update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm) - try: - update.present(self.module.params['record'], self.module.params['type']) - except dns.rdatatype.UnknownRdatatype as e: - self.module.fail_json(msg='Record error: {0}'.format(to_native(e))) - - response = self.__do_update(update) - self.dns_rc = dns.message.Message.rcode(response) - if self.dns_rc == 0: - if self.module.params['state'] == 'absent': - return 1 - for entry in self.value: - try: - update.present(self.module.params['record'], self.module.params['type'], entry) - except AttributeError: - self.module.fail_json(msg='value needed when state=present') - except dns.exception.SyntaxError: - self.module.fail_json(msg='Invalid/malformed value') - response = self.__do_update(update) - self.dns_rc = dns.message.Message.rcode(response) - if self.dns_rc == 0: - if self.ttl_changed(): - return 2 - else: - return 1 - else: - return 2 - else: - return 0 - - def ttl_changed(self): - query = dns.message.make_query(self.fqdn, self.module.params['type']) - if self.keyring: - query.use_tsig(keyring=self.keyring, algorithm=self.algorithm) - - try: - if self.module.params['protocol'] == 'tcp': - lookup = dns.query.tcp(query, self.module.params['server'], timeout=10, port=self.module.params['port']) - else: - lookup = dns.query.udp(query, self.module.params['server'], timeout=10, port=self.module.params['port']) - except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e: - self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e))) - except (socket_error, dns.exception.Timeout) as e: - self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e))) - - if lookup.rcode() != dns.rcode.NOERROR: - self.module.fail_json(msg='Failed to lookup TTL of existing matching record.') - - current_ttl = lookup.answer[0].ttl - return current_ttl != self.module.params['ttl'] - - -def main(): - tsig_algs = ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224', - 'hmac-sha256', 'hmac-sha384', 'hmac-sha512'] - - module = AnsibleModule( - argument_spec=dict( - state=dict(required=False, default='present', choices=['present', 'absent'], type='str'), - server=dict(required=True, type='str'), - port=dict(required=False, default=53, type='int'), - key_name=dict(required=False, type='str'), - key_secret=dict(required=False, type='str', no_log=True), - key_algorithm=dict(required=False, default='hmac-md5', choices=tsig_algs, type='str'), - zone=dict(required=False, default=None, type='str'), - record=dict(required=True, type='str'), - type=dict(required=False, default='A', type='str'), - ttl=dict(required=False, default=3600, type='int'), - value=dict(required=False, default=None, type='list', elements='str'), - protocol=dict(required=False, default='tcp', choices=['tcp', 'udp'], type='str') - ), - supports_check_mode=True - ) - - if not HAVE_DNSPYTHON: - module.fail_json(msg=missing_required_lib('dnspython'), exception=DNSPYTHON_IMP_ERR) - - if len(module.params["record"]) == 0: - module.fail_json(msg='record cannot be empty.') - - record = RecordManager(module) - result = {} - if module.params["state"] == 'absent': - result = record.remove_record() - elif module.params["state"] == 'present': - result = record.create_or_update_record() - - result['dns_rc'] = record.dns_rc - result['dns_rc_str'] = dns.rcode.to_text(record.dns_rc) - if result['failed']: - module.fail_json(**result) - else: - result['record'] = dict(zone=record.zone, - record=module.params['record'], - type=module.params['type'], - ttl=module.params['ttl'], - value=record.value) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/net_tools/omapi_host.py b/ansible_collections/community/general/plugins/modules/net_tools/omapi_host.py deleted file mode 100644 index 4d65fcb9..00000000 --- a/ansible_collections/community/general/plugins/modules/net_tools/omapi_host.py +++ /dev/null @@ -1,311 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# copyright: (c) 2016, Loic Blot -# Sponsored by Infopro Digital. http://www.infopro-digital.com/ -# Sponsored by E.T.A.I. http://www.etai.fr/ -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: omapi_host -short_description: Setup OMAPI hosts. -description: Manage OMAPI hosts into compatible DHCPd servers -requirements: - - pypureomapi -author: -- Loic Blot (@nerzhul) -options: - state: - description: - - Create or remove OMAPI host. - type: str - required: true - choices: [ absent, present ] - hostname: - description: - - Sets the host lease hostname (mandatory if state=present). - type: str - aliases: [ name ] - host: - description: - - Sets OMAPI server host to interact with. - type: str - default: localhost - port: - description: - - Sets the OMAPI server port to interact with. - type: int - default: 7911 - key_name: - description: - - Sets the TSIG key name for authenticating against OMAPI server. - type: str - required: true - key: - description: - - Sets the TSIG key content for authenticating against OMAPI server. - type: str - required: true - macaddr: - description: - - Sets the lease host MAC address. - type: str - required: true - ip: - description: - - Sets the lease host IP address. - type: str - statements: - description: - - Attach a list of OMAPI DHCP statements with host lease (without ending semicolon). - type: list - elements: str - default: [] - ddns: - description: - - Enable dynamic DNS updates for this host. - type: bool - default: no - -''' -EXAMPLES = r''' -- name: Add a host using OMAPI - community.general.omapi_host: - key_name: defomapi - key: +bFQtBCta6j2vWkjPkNFtgA== - host: 10.98.4.55 - macaddr: 44:dd:ab:dd:11:44 - name: server01 - ip: 192.168.88.99 - ddns: yes - statements: - - filename "pxelinux.0" - - next-server 1.1.1.1 - state: present - -- name: Remove a host using OMAPI - community.general.omapi_host: - key_name: defomapi - key: +bFQtBCta6j2vWkjPkNFtgA== - host: 10.1.1.1 - macaddr: 00:66:ab:dd:11:44 - state: absent -''' - -RETURN = r''' -lease: - description: dictionary containing host information - returned: success - type: complex - contains: - ip-address: - description: IP address, if there is. - returned: success - type: str - sample: '192.168.1.5' - hardware-address: - description: MAC address - returned: success - type: str - sample: '00:11:22:33:44:55' - hardware-type: - description: hardware type, generally '1' - returned: success - type: int - sample: 1 - name: - description: hostname - returned: success - type: str - sample: 'mydesktop' -''' - -import binascii -import socket -import struct -import traceback - -PUREOMAPI_IMP_ERR = None -try: - from pypureomapi import Omapi, OmapiMessage, OmapiError, OmapiErrorNotFound - from pypureomapi import pack_ip, unpack_ip, pack_mac, unpack_mac - from pypureomapi import OMAPI_OP_STATUS, OMAPI_OP_UPDATE - pureomapi_found = True -except ImportError: - PUREOMAPI_IMP_ERR = traceback.format_exc() - pureomapi_found = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_bytes, to_native - - -class OmapiHostManager: - def __init__(self, module): - self.module = module - self.omapi = None - self.connect() - - def connect(self): - try: - self.omapi = Omapi(self.module.params['host'], self.module.params['port'], to_bytes(self.module.params['key_name']), - self.module.params['key']) - except binascii.Error: - self.module.fail_json(msg="Unable to open OMAPI connection. 'key' is not a valid base64 key.") - except OmapiError as e: - self.module.fail_json(msg="Unable to open OMAPI connection. Ensure 'host', 'port', 'key' and 'key_name' " - "are valid. Exception was: %s" % to_native(e)) - except socket.error as e: - self.module.fail_json(msg="Unable to connect to OMAPI server: %s" % to_native(e)) - - def get_host(self, macaddr): - msg = OmapiMessage.open(to_bytes("host", errors='surrogate_or_strict')) - msg.obj.append((to_bytes("hardware-address", errors='surrogate_or_strict'), pack_mac(macaddr))) - msg.obj.append((to_bytes("hardware-type", errors='surrogate_or_strict'), struct.pack("!I", 1))) - response = self.omapi.query_server(msg) - if response.opcode != OMAPI_OP_UPDATE: - return None - return response - - @staticmethod - def unpack_facts(obj): - result = dict(obj) - if 'hardware-address' in result: - result['hardware-address'] = to_native(unpack_mac(result[to_bytes('hardware-address')])) - - if 'ip-address' in result: - result['ip-address'] = to_native(unpack_ip(result[to_bytes('ip-address')])) - - if 'hardware-type' in result: - result['hardware-type'] = struct.unpack("!I", result[to_bytes('hardware-type')]) - - return result - - def setup_host(self): - if self.module.params['hostname'] is None or len(self.module.params['hostname']) == 0: - self.module.fail_json(msg="name attribute could not be empty when adding or modifying host.") - - msg = None - host_response = self.get_host(self.module.params['macaddr']) - # If host was not found using macaddr, add create message - if host_response is None: - msg = OmapiMessage.open(to_bytes('host', errors='surrogate_or_strict')) - msg.message.append((to_bytes('create'), struct.pack('!I', 1))) - msg.message.append((to_bytes('exclusive'), struct.pack('!I', 1))) - msg.obj.append((to_bytes('hardware-address'), pack_mac(self.module.params['macaddr']))) - msg.obj.append((to_bytes('hardware-type'), struct.pack('!I', 1))) - msg.obj.append((to_bytes('name'), to_bytes(self.module.params['hostname']))) - if self.module.params['ip'] is not None: - msg.obj.append((to_bytes("ip-address", errors='surrogate_or_strict'), pack_ip(self.module.params['ip']))) - - stmt_join = "" - if self.module.params['ddns']: - stmt_join += 'ddns-hostname "{0}"; '.format(self.module.params['hostname']) - - try: - if len(self.module.params['statements']) > 0: - stmt_join += "; ".join(self.module.params['statements']) - stmt_join += "; " - except TypeError as e: - self.module.fail_json(msg="Invalid statements found: %s" % to_native(e)) - - if len(stmt_join) > 0: - msg.obj.append((to_bytes('statements'), to_bytes(stmt_join))) - - try: - response = self.omapi.query_server(msg) - if response.opcode != OMAPI_OP_UPDATE: - self.module.fail_json(msg="Failed to add host, ensure authentication and host parameters " - "are valid.") - self.module.exit_json(changed=True, lease=self.unpack_facts(response.obj)) - except OmapiError as e: - self.module.fail_json(msg="OMAPI error: %s" % to_native(e)) - # Forge update message - else: - response_obj = self.unpack_facts(host_response.obj) - fields_to_update = {} - - if to_bytes('ip-address', errors='surrogate_or_strict') not in response_obj or \ - unpack_ip(response_obj[to_bytes('ip-address', errors='surrogate_or_strict')]) != self.module.params['ip']: - fields_to_update['ip-address'] = pack_ip(self.module.params['ip']) - - # Name cannot be changed - if 'name' not in response_obj or response_obj['name'] != self.module.params['hostname']: - self.module.fail_json(msg="Changing hostname is not supported. Old was %s, new is %s. " - "Please delete host and add new." % - (response_obj['name'], self.module.params['hostname'])) - - """ - # It seems statements are not returned by OMAPI, then we cannot modify them at this moment. - if 'statements' not in response_obj and len(self.module.params['statements']) > 0 or \ - response_obj['statements'] != self.module.params['statements']: - with open('/tmp/omapi', 'w') as fb: - for (k,v) in iteritems(response_obj): - fb.writelines('statements: %s %s\n' % (k, v)) - """ - if len(fields_to_update) == 0: - self.module.exit_json(changed=False, lease=response_obj) - else: - msg = OmapiMessage.update(host_response.handle) - msg.update_object(fields_to_update) - - try: - response = self.omapi.query_server(msg) - if response.opcode != OMAPI_OP_STATUS: - self.module.fail_json(msg="Failed to modify host, ensure authentication and host parameters " - "are valid.") - self.module.exit_json(changed=True) - except OmapiError as e: - self.module.fail_json(msg="OMAPI error: %s" % to_native(e)) - - def remove_host(self): - try: - self.omapi.del_host(self.module.params['macaddr']) - self.module.exit_json(changed=True) - except OmapiErrorNotFound: - self.module.exit_json() - except OmapiError as e: - self.module.fail_json(msg="OMAPI error: %s" % to_native(e)) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - state=dict(type='str', required=True, choices=['absent', 'present']), - host=dict(type='str', default="localhost"), - port=dict(type='int', default=7911), - key_name=dict(type='str', required=True), - key=dict(type='str', required=True, no_log=True), - macaddr=dict(type='str', required=True), - hostname=dict(type='str', aliases=['name']), - ip=dict(type='str'), - ddns=dict(type='bool', default=False), - statements=dict(type='list', elements='str', default=[]), - ), - supports_check_mode=False, - ) - - if not pureomapi_found: - module.fail_json(msg=missing_required_lib('pypureomapi'), exception=PUREOMAPI_IMP_ERR) - - if module.params['key'] is None or len(module.params["key"]) == 0: - module.fail_json(msg="'key' parameter cannot be empty.") - - if module.params['key_name'] is None or len(module.params["key_name"]) == 0: - module.fail_json(msg="'key_name' parameter cannot be empty.") - - host_manager = OmapiHostManager(module) - try: - if module.params['state'] == 'present': - host_manager.setup_host() - elif module.params['state'] == 'absent': - host_manager.remove_host() - except ValueError as e: - module.fail_json(msg="OMAPI input value error: %s" % to_native(e)) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/net_tools/snmp_facts.py b/ansible_collections/community/general/plugins/modules/net_tools/snmp_facts.py deleted file mode 100644 index 37183b95..00000000 --- a/ansible_collections/community/general/plugins/modules/net_tools/snmp_facts.py +++ /dev/null @@ -1,472 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# This file is part of Networklore's snmp library for Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: snmp_facts -author: -- Patrick Ogenstad (@ogenstad) -short_description: Retrieve facts for a device using SNMP -description: - - Retrieve facts for a device using SNMP, the facts will be - inserted to the ansible_facts key. -requirements: - - pysnmp -options: - host: - description: - - Set to target SNMP server (normally C({{ inventory_hostname }})). - type: str - required: true - version: - description: - - SNMP Version to use, C(v2), C(v2c) or C(v3). - type: str - required: true - choices: [ v2, v2c, v3 ] - community: - description: - - The SNMP community string, required if I(version) is C(v2) or C(v2c). - type: str - level: - description: - - Authentication level. - - Required if I(version) is C(v3). - type: str - choices: [ authNoPriv, authPriv ] - username: - description: - - Username for SNMPv3. - - Required if I(version) is C(v3). - type: str - integrity: - description: - - Hashing algorithm. - - Required if I(version) is C(v3). - type: str - choices: [ md5, sha ] - authkey: - description: - - Authentication key. - - Required I(version) is C(v3). - type: str - privacy: - description: - - Encryption algorithm. - - Required if I(level) is C(authPriv). - type: str - choices: [ aes, des ] - privkey: - description: - - Encryption key. - - Required if I(level) is C(authPriv). - type: str - timeout: - description: - - Response timeout in seconds. - type: int - version_added: 2.3.0 - retries: - description: - - Maximum number of request retries, 0 retries means just a single request. - type: int - version_added: 2.3.0 -''' - -EXAMPLES = r''' -- name: Gather facts with SNMP version 2 - community.general.snmp_facts: - host: '{{ inventory_hostname }}' - version: v2c - community: public - delegate_to: local - -- name: Gather facts using SNMP version 3 - community.general.snmp_facts: - host: '{{ inventory_hostname }}' - version: v3 - level: authPriv - integrity: sha - privacy: aes - username: snmp-user - authkey: abc12345 - privkey: def6789 - delegate_to: localhost -''' - -RETURN = r''' -ansible_sysdescr: - description: A textual description of the entity. - returned: success - type: str - sample: Linux ubuntu-user 4.4.0-93-generic #116-Ubuntu SMP Fri Aug 11 21:17:51 UTC 2017 x86_64 -ansible_sysobjectid: - description: The vendor's authoritative identification of the network management subsystem contained in the entity. - returned: success - type: str - sample: 1.3.6.1.4.1.8072.3.2.10 -ansible_sysuptime: - description: The time (in hundredths of a second) since the network management portion of the system was last re-initialized. - returned: success - type: int - sample: 42388 -ansible_syscontact: - description: The textual identification of the contact person for this managed node, together with information on how to contact this person. - returned: success - type: str - sample: Me -ansible_sysname: - description: An administratively-assigned name for this managed node. - returned: success - type: str - sample: ubuntu-user -ansible_syslocation: - description: The physical location of this node (e.g., C(telephone closet, 3rd floor)). - returned: success - type: str - sample: Sitting on the Dock of the Bay -ansible_all_ipv4_addresses: - description: List of all IPv4 addresses. - returned: success - type: list - sample: ["127.0.0.1", "172.17.0.1"] -ansible_interfaces: - description: Dictionary of each network interface and its metadata. - returned: success - type: dict - sample: { - "1": { - "adminstatus": "up", - "description": "", - "ifindex": "1", - "ipv4": [ - { - "address": "127.0.0.1", - "netmask": "255.0.0.0" - } - ], - "mac": "", - "mtu": "65536", - "name": "lo", - "operstatus": "up", - "speed": "65536" - }, - "2": { - "adminstatus": "up", - "description": "", - "ifindex": "2", - "ipv4": [ - { - "address": "192.168.213.128", - "netmask": "255.255.255.0" - } - ], - "mac": "000a305a52a1", - "mtu": "1500", - "name": "Intel Corporation 82545EM Gigabit Ethernet Controller (Copper)", - "operstatus": "up", - "speed": "1500" - } - } -''' - -import binascii -import traceback -from collections import defaultdict - -PYSNMP_IMP_ERR = None -try: - from pysnmp.entity.rfc3413.oneliner import cmdgen - from pysnmp.proto.rfc1905 import EndOfMibView - HAS_PYSNMP = True -except Exception: - PYSNMP_IMP_ERR = traceback.format_exc() - HAS_PYSNMP = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_text - - -class DefineOid(object): - - def __init__(self, dotprefix=False): - if dotprefix: - dp = "." - else: - dp = "" - - # From SNMPv2-MIB - self.sysDescr = dp + "1.3.6.1.2.1.1.1.0" - self.sysObjectId = dp + "1.3.6.1.2.1.1.2.0" - self.sysUpTime = dp + "1.3.6.1.2.1.1.3.0" - self.sysContact = dp + "1.3.6.1.2.1.1.4.0" - self.sysName = dp + "1.3.6.1.2.1.1.5.0" - self.sysLocation = dp + "1.3.6.1.2.1.1.6.0" - - # From IF-MIB - self.ifIndex = dp + "1.3.6.1.2.1.2.2.1.1" - self.ifDescr = dp + "1.3.6.1.2.1.2.2.1.2" - self.ifMtu = dp + "1.3.6.1.2.1.2.2.1.4" - self.ifSpeed = dp + "1.3.6.1.2.1.2.2.1.5" - self.ifPhysAddress = dp + "1.3.6.1.2.1.2.2.1.6" - self.ifAdminStatus = dp + "1.3.6.1.2.1.2.2.1.7" - self.ifOperStatus = dp + "1.3.6.1.2.1.2.2.1.8" - self.ifAlias = dp + "1.3.6.1.2.1.31.1.1.1.18" - - # From IP-MIB - self.ipAdEntAddr = dp + "1.3.6.1.2.1.4.20.1.1" - self.ipAdEntIfIndex = dp + "1.3.6.1.2.1.4.20.1.2" - self.ipAdEntNetMask = dp + "1.3.6.1.2.1.4.20.1.3" - - -def decode_hex(hexstring): - - if len(hexstring) < 3: - return hexstring - if hexstring[:2] == "0x": - return to_text(binascii.unhexlify(hexstring[2:])) - return hexstring - - -def decode_mac(hexstring): - - if len(hexstring) != 14: - return hexstring - if hexstring[:2] == "0x": - return hexstring[2:] - return hexstring - - -def lookup_adminstatus(int_adminstatus): - adminstatus_options = { - 1: 'up', - 2: 'down', - 3: 'testing' - } - if int_adminstatus in adminstatus_options: - return adminstatus_options[int_adminstatus] - return "" - - -def lookup_operstatus(int_operstatus): - operstatus_options = { - 1: 'up', - 2: 'down', - 3: 'testing', - 4: 'unknown', - 5: 'dormant', - 6: 'notPresent', - 7: 'lowerLayerDown' - } - if int_operstatus in operstatus_options: - return operstatus_options[int_operstatus] - return "" - - -def main(): - module = AnsibleModule( - argument_spec=dict( - host=dict(type='str', required=True), - version=dict(type='str', required=True, choices=['v2', 'v2c', 'v3']), - community=dict(type='str'), - username=dict(type='str'), - level=dict(type='str', choices=['authNoPriv', 'authPriv']), - integrity=dict(type='str', choices=['md5', 'sha']), - privacy=dict(type='str', choices=['aes', 'des']), - authkey=dict(type='str', no_log=True), - privkey=dict(type='str', no_log=True), - timeout=dict(type='int'), - retries=dict(type='int'), - ), - required_together=( - ['username', 'level', 'integrity', 'authkey'], - ['privacy', 'privkey'], - ), - supports_check_mode=True, - ) - - m_args = module.params - - if not HAS_PYSNMP: - module.fail_json(msg=missing_required_lib('pysnmp'), exception=PYSNMP_IMP_ERR) - - cmdGen = cmdgen.CommandGenerator() - transport_opts = dict((k, m_args[k]) for k in ('timeout', 'retries') if m_args[k] is not None) - - # Verify that we receive a community when using snmp v2 - if m_args['version'] in ("v2", "v2c"): - if m_args['community'] is None: - module.fail_json(msg='Community not set when using snmp version 2') - - if m_args['version'] == "v3": - if m_args['username'] is None: - module.fail_json(msg='Username not set when using snmp version 3') - - if m_args['level'] == "authPriv" and m_args['privacy'] is None: - module.fail_json(msg='Privacy algorithm not set when using authPriv') - - if m_args['integrity'] == "sha": - integrity_proto = cmdgen.usmHMACSHAAuthProtocol - elif m_args['integrity'] == "md5": - integrity_proto = cmdgen.usmHMACMD5AuthProtocol - - if m_args['privacy'] == "aes": - privacy_proto = cmdgen.usmAesCfb128Protocol - elif m_args['privacy'] == "des": - privacy_proto = cmdgen.usmDESPrivProtocol - - # Use SNMP Version 2 - if m_args['version'] in ("v2", "v2c"): - snmp_auth = cmdgen.CommunityData(m_args['community']) - - # Use SNMP Version 3 with authNoPriv - elif m_args['level'] == "authNoPriv": - snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], authProtocol=integrity_proto) - - # Use SNMP Version 3 with authPriv - else: - snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], privKey=m_args['privkey'], authProtocol=integrity_proto, - privProtocol=privacy_proto) - - # Use p to prefix OIDs with a dot for polling - p = DefineOid(dotprefix=True) - # Use v without a prefix to use with return values - v = DefineOid(dotprefix=False) - - def Tree(): - return defaultdict(Tree) - - results = Tree() - - errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd( - snmp_auth, - cmdgen.UdpTransportTarget((m_args['host'], 161), **transport_opts), - cmdgen.MibVariable(p.sysDescr,), - cmdgen.MibVariable(p.sysObjectId,), - cmdgen.MibVariable(p.sysUpTime,), - cmdgen.MibVariable(p.sysContact,), - cmdgen.MibVariable(p.sysName,), - cmdgen.MibVariable(p.sysLocation,), - lookupMib=False - ) - - if errorIndication: - module.fail_json(msg=str(errorIndication)) - - for oid, val in varBinds: - current_oid = oid.prettyPrint() - current_val = val.prettyPrint() - if current_oid == v.sysDescr: - results['ansible_sysdescr'] = decode_hex(current_val) - elif current_oid == v.sysObjectId: - results['ansible_sysobjectid'] = current_val - elif current_oid == v.sysUpTime: - results['ansible_sysuptime'] = current_val - elif current_oid == v.sysContact: - results['ansible_syscontact'] = current_val - elif current_oid == v.sysName: - results['ansible_sysname'] = current_val - elif current_oid == v.sysLocation: - results['ansible_syslocation'] = current_val - - errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd( - snmp_auth, - cmdgen.UdpTransportTarget((m_args['host'], 161), **transport_opts), - cmdgen.MibVariable(p.ifIndex,), - cmdgen.MibVariable(p.ifDescr,), - cmdgen.MibVariable(p.ifMtu,), - cmdgen.MibVariable(p.ifSpeed,), - cmdgen.MibVariable(p.ifPhysAddress,), - cmdgen.MibVariable(p.ifAdminStatus,), - cmdgen.MibVariable(p.ifOperStatus,), - cmdgen.MibVariable(p.ipAdEntAddr,), - cmdgen.MibVariable(p.ipAdEntIfIndex,), - cmdgen.MibVariable(p.ipAdEntNetMask,), - - cmdgen.MibVariable(p.ifAlias,), - lookupMib=False - ) - - if errorIndication: - module.fail_json(msg=str(errorIndication)) - - interface_indexes = [] - - all_ipv4_addresses = [] - ipv4_networks = Tree() - - for varBinds in varTable: - for oid, val in varBinds: - if isinstance(val, EndOfMibView): - continue - current_oid = oid.prettyPrint() - current_val = val.prettyPrint() - if v.ifIndex in current_oid: - ifIndex = int(current_oid.rsplit('.', 1)[-1]) - results['ansible_interfaces'][ifIndex]['ifindex'] = current_val - interface_indexes.append(ifIndex) - if v.ifDescr in current_oid: - ifIndex = int(current_oid.rsplit('.', 1)[-1]) - results['ansible_interfaces'][ifIndex]['name'] = current_val - if v.ifMtu in current_oid: - ifIndex = int(current_oid.rsplit('.', 1)[-1]) - results['ansible_interfaces'][ifIndex]['mtu'] = current_val - if v.ifSpeed in current_oid: - ifIndex = int(current_oid.rsplit('.', 1)[-1]) - results['ansible_interfaces'][ifIndex]['speed'] = current_val - if v.ifPhysAddress in current_oid: - ifIndex = int(current_oid.rsplit('.', 1)[-1]) - results['ansible_interfaces'][ifIndex]['mac'] = decode_mac(current_val) - if v.ifAdminStatus in current_oid: - ifIndex = int(current_oid.rsplit('.', 1)[-1]) - results['ansible_interfaces'][ifIndex]['adminstatus'] = lookup_adminstatus(int(current_val)) - if v.ifOperStatus in current_oid: - ifIndex = int(current_oid.rsplit('.', 1)[-1]) - results['ansible_interfaces'][ifIndex]['operstatus'] = lookup_operstatus(int(current_val)) - if v.ipAdEntAddr in current_oid: - curIPList = current_oid.rsplit('.', 4)[-4:] - curIP = ".".join(curIPList) - ipv4_networks[curIP]['address'] = current_val - all_ipv4_addresses.append(current_val) - if v.ipAdEntIfIndex in current_oid: - curIPList = current_oid.rsplit('.', 4)[-4:] - curIP = ".".join(curIPList) - ipv4_networks[curIP]['interface'] = current_val - if v.ipAdEntNetMask in current_oid: - curIPList = current_oid.rsplit('.', 4)[-4:] - curIP = ".".join(curIPList) - ipv4_networks[curIP]['netmask'] = current_val - - if v.ifAlias in current_oid: - ifIndex = int(current_oid.rsplit('.', 1)[-1]) - results['ansible_interfaces'][ifIndex]['description'] = current_val - - interface_to_ipv4 = {} - for ipv4_network in ipv4_networks: - current_interface = ipv4_networks[ipv4_network]['interface'] - current_network = { - 'address': ipv4_networks[ipv4_network]['address'], - 'netmask': ipv4_networks[ipv4_network]['netmask'] - } - if current_interface not in interface_to_ipv4: - interface_to_ipv4[current_interface] = [] - interface_to_ipv4[current_interface].append(current_network) - else: - interface_to_ipv4[current_interface].append(current_network) - - for interface in interface_to_ipv4: - results['ansible_interfaces'][int(interface)]['ipv4'] = interface_to_ipv4[interface] - - results['ansible_all_ipv4_addresses'] = all_ipv4_addresses - - module.exit_json(ansible_facts=results) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/netcup_dns.py b/ansible_collections/community/general/plugins/modules/netcup_dns.py deleted file mode 120000 index 66da4493..00000000 --- a/ansible_collections/community/general/plugins/modules/netcup_dns.py +++ /dev/null @@ -1 +0,0 @@ -net_tools/netcup_dns.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/newrelic_deployment.py b/ansible_collections/community/general/plugins/modules/newrelic_deployment.py deleted file mode 120000 index b91ab34b..00000000 --- a/ansible_collections/community/general/plugins/modules/newrelic_deployment.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/newrelic_deployment.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/nexmo.py b/ansible_collections/community/general/plugins/modules/nexmo.py deleted file mode 120000 index a59c1440..00000000 --- a/ansible_collections/community/general/plugins/modules/nexmo.py +++ /dev/null @@ -1 +0,0 @@ -notification/nexmo.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/nginx_status_info.py b/ansible_collections/community/general/plugins/modules/nginx_status_info.py deleted file mode 120000 index 5e9185ae..00000000 --- a/ansible_collections/community/general/plugins/modules/nginx_status_info.py +++ /dev/null @@ -1 +0,0 @@ -web_infrastructure/nginx_status_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/nictagadm.py b/ansible_collections/community/general/plugins/modules/nictagadm.py deleted file mode 120000 index 94e497b8..00000000 --- a/ansible_collections/community/general/plugins/modules/nictagadm.py +++ /dev/null @@ -1 +0,0 @@ -cloud/smartos/nictagadm.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/nmcli.py b/ansible_collections/community/general/plugins/modules/nmcli.py deleted file mode 120000 index 6c2c1534..00000000 --- a/ansible_collections/community/general/plugins/modules/nmcli.py +++ /dev/null @@ -1 +0,0 @@ -net_tools/nmcli.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/nomad_job.py b/ansible_collections/community/general/plugins/modules/nomad_job.py deleted file mode 120000 index 763b37d1..00000000 --- a/ansible_collections/community/general/plugins/modules/nomad_job.py +++ /dev/null @@ -1 +0,0 @@ -clustering/nomad/nomad_job.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/nomad_job_info.py b/ansible_collections/community/general/plugins/modules/nomad_job_info.py deleted file mode 120000 index 9749646d..00000000 --- a/ansible_collections/community/general/plugins/modules/nomad_job_info.py +++ /dev/null @@ -1 +0,0 @@ -clustering/nomad/nomad_job_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/nosh.py b/ansible_collections/community/general/plugins/modules/nosh.py deleted file mode 120000 index f1401ea8..00000000 --- a/ansible_collections/community/general/plugins/modules/nosh.py +++ /dev/null @@ -1 +0,0 @@ -system/nosh.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/notification/bearychat.py b/ansible_collections/community/general/plugins/modules/notification/bearychat.py deleted file mode 100644 index 4c907ea6..00000000 --- a/ansible_collections/community/general/plugins/modules/notification/bearychat.py +++ /dev/null @@ -1,182 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2016, Jiangge Zhang -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' -module: bearychat -short_description: Send BearyChat notifications -description: - - The M(community.general.bearychat) module sends notifications to U(https://bearychat.com) - via the Incoming Robot integration. -author: "Jiangge Zhang (@tonyseek)" -options: - url: - type: str - description: - - BearyChat WebHook URL. This authenticates you to the bearychat - service. It looks like - C(https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60). - required: true - text: - type: str - description: - - Message to send. - markdown: - description: - - If C(yes), text will be parsed as markdown. - default: 'yes' - type: bool - channel: - type: str - description: - - Channel to send the message to. If absent, the message goes to the - default channel selected by the I(url). - attachments: - type: list - elements: dict - description: - - Define a list of attachments. For more information, see - https://github.com/bearyinnovative/bearychat-tutorial/blob/master/robots/incoming.md#attachments -''' - -EXAMPLES = """ -- name: Send notification message via BearyChat - local_action: - module: bearychat - url: | - https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60 - text: "{{ inventory_hostname }} completed" - -- name: Send notification message via BearyChat all options - local_action: - module: bearychat - url: | - https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60 - text: "{{ inventory_hostname }} completed" - markdown: no - channel: "#ansible" - attachments: - - title: "Ansible on {{ inventory_hostname }}" - text: "May the Force be with you." - color: "#ffffff" - images: - - http://example.com/index.png -""" - -RETURN = """ -msg: - description: execution result - returned: success - type: str - sample: "OK" -""" - -try: - from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse - HAS_URLPARSE = True -except Exception: - HAS_URLPARSE = False -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url - - -def build_payload_for_bearychat(module, text, markdown, channel, attachments): - payload = {} - if text is not None: - payload['text'] = text - if markdown is not None: - payload['markdown'] = markdown - if channel is not None: - payload['channel'] = channel - if attachments is not None: - payload.setdefault('attachments', []).extend( - build_payload_for_bearychat_attachment( - module, item.get('title'), item.get('text'), item.get('color'), - item.get('images')) - for item in attachments) - payload = 'payload=%s' % module.jsonify(payload) - return payload - - -def build_payload_for_bearychat_attachment(module, title, text, color, images): - attachment = {} - if title is not None: - attachment['title'] = title - if text is not None: - attachment['text'] = text - if color is not None: - attachment['color'] = color - if images is not None: - target_images = attachment.setdefault('images', []) - if not isinstance(images, (list, tuple)): - images = [images] - for image in images: - if isinstance(image, dict) and 'url' in image: - image = {'url': image['url']} - elif hasattr(image, 'startswith') and image.startswith('http'): - image = {'url': image} - else: - module.fail_json( - msg="BearyChat doesn't have support for this kind of " - "attachment image") - target_images.append(image) - return attachment - - -def do_notify_bearychat(module, url, payload): - response, info = fetch_url(module, url, data=payload) - if info['status'] != 200: - url_info = urlparse(url) - obscured_incoming_webhook = urlunparse( - (url_info.scheme, url_info.netloc, '[obscured]', '', '', '')) - module.fail_json( - msg=" failed to send %s to %s: %s" % ( - payload, obscured_incoming_webhook, info['msg'])) - - -def main(): - module = AnsibleModule(argument_spec={ - 'url': dict(type='str', required=True, no_log=True), - 'text': dict(type='str'), - 'markdown': dict(default=True, type='bool'), - 'channel': dict(type='str'), - 'attachments': dict(type='list', elements='dict'), - }) - - if not HAS_URLPARSE: - module.fail_json(msg='urlparse is not installed') - - url = module.params['url'] - text = module.params['text'] - markdown = module.params['markdown'] - channel = module.params['channel'] - attachments = module.params['attachments'] - - payload = build_payload_for_bearychat( - module, text, markdown, channel, attachments) - do_notify_bearychat(module, url, payload) - - module.exit_json(msg="OK") - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/notification/campfire.py b/ansible_collections/community/general/plugins/modules/notification/campfire.py deleted file mode 100644 index c6848238..00000000 --- a/ansible_collections/community/general/plugins/modules/notification/campfire.py +++ /dev/null @@ -1,154 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: campfire -short_description: Send a message to Campfire -description: - - Send a message to Campfire. - - Messages with newlines will result in a "Paste" message being sent. -options: - subscription: - type: str - description: - - The subscription name to use. - required: true - token: - type: str - description: - - API token. - required: true - room: - type: str - description: - - Room number to which the message should be sent. - required: true - msg: - type: str - description: - - The message body. - required: true - notify: - type: str - description: - - Send a notification sound before the message. - required: false - choices: ["56k", "bell", "bezos", "bueller", "clowntown", - "cottoneyejoe", "crickets", "dadgummit", "dangerzone", - "danielsan", "deeper", "drama", "greatjob", "greyjoy", - "guarantee", "heygirl", "horn", "horror", - "inconceivable", "live", "loggins", "makeitso", "noooo", - "nyan", "ohmy", "ohyeah", "pushit", "rimshot", - "rollout", "rumble", "sax", "secret", "sexyback", - "story", "tada", "tmyk", "trololo", "trombone", "unix", - "vuvuzela", "what", "whoomp", "yeah", "yodel"] - -# informational: requirements for nodes -requirements: [ ] -author: "Adam Garside (@fabulops)" -''' - -EXAMPLES = ''' -- name: Send a message to Campfire - community.general.campfire: - subscription: foo - token: 12345 - room: 123 - msg: Task completed. - -- name: Send a message to Campfire - community.general.campfire: - subscription: foo - token: 12345 - room: 123 - notify: loggins - msg: Task completed ... with feeling. -''' - -try: - from html import escape as html_escape -except ImportError: - # Python-3.2 or later - import cgi - - def html_escape(text, quote=True): - return cgi.escape(text, quote) - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - subscription=dict(required=True), - token=dict(required=True, no_log=True), - room=dict(required=True), - msg=dict(required=True), - notify=dict(required=False, - choices=["56k", "bell", "bezos", "bueller", - "clowntown", "cottoneyejoe", - "crickets", "dadgummit", "dangerzone", - "danielsan", "deeper", "drama", - "greatjob", "greyjoy", "guarantee", - "heygirl", "horn", "horror", - "inconceivable", "live", "loggins", - "makeitso", "noooo", "nyan", "ohmy", - "ohyeah", "pushit", "rimshot", - "rollout", "rumble", "sax", "secret", - "sexyback", "story", "tada", "tmyk", - "trololo", "trombone", "unix", - "vuvuzela", "what", "whoomp", "yeah", - "yodel"]), - ), - supports_check_mode=False - ) - - subscription = module.params["subscription"] - token = module.params["token"] - room = module.params["room"] - msg = module.params["msg"] - notify = module.params["notify"] - - URI = "https://%s.campfirenow.com" % subscription - NSTR = "SoundMessage%s" - MSTR = "%s" - AGENT = "Ansible/1.2" - - # Hack to add basic auth username and password the way fetch_url expects - module.params['url_username'] = token - module.params['url_password'] = 'X' - - target_url = '%s/room/%s/speak.xml' % (URI, room) - headers = {'Content-Type': 'application/xml', - 'User-agent': AGENT} - - # Send some audible notification if requested - if notify: - response, info = fetch_url(module, target_url, data=NSTR % html_escape(notify), headers=headers) - if info['status'] not in [200, 201]: - module.fail_json(msg="unable to send msg: '%s', campfire api" - " returned error code: '%s'" % - (notify, info['status'])) - - # Send the message - response, info = fetch_url(module, target_url, data=MSTR % html_escape(msg), headers=headers) - if info['status'] not in [200, 201]: - module.fail_json(msg="unable to send msg: '%s', campfire api" - " returned error code: '%s'" % - (msg, info['status'])) - - module.exit_json(changed=True, room=room, msg=msg, notify=notify) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/notification/catapult.py b/ansible_collections/community/general/plugins/modules/notification/catapult.py deleted file mode 100644 index 13833620..00000000 --- a/ansible_collections/community/general/plugins/modules/notification/catapult.py +++ /dev/null @@ -1,154 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2016, Jonathan Mainguy -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# -# basis of code taken from the ansible twillio and nexmo modules - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: catapult -short_description: Send a sms / mms using the catapult bandwidth api -description: - - Allows notifications to be sent using sms / mms via the catapult bandwidth api. -options: - src: - type: str - description: - - One of your catapult telephone numbers the message should come from (must be in E.164 format, like C(+19195551212)). - required: true - dest: - type: list - elements: str - description: - - The phone number or numbers the message should be sent to (must be in E.164 format, like C(+19195551212)). - required: true - msg: - type: str - description: - - The contents of the text message (must be 2048 characters or less). - required: true - media: - type: str - description: - - For MMS messages, a media url to the location of the media to be sent with the message. - user_id: - type: str - description: - - User Id from Api account page. - required: true - api_token: - type: str - description: - - Api Token from Api account page. - required: true - api_secret: - type: str - description: - - Api Secret from Api account page. - required: true - -author: "Jonathan Mainguy (@Jmainguy)" -notes: - - Will return changed even if the media url is wrong. - - Will return changed if the destination number is invalid. - -''' - -EXAMPLES = ''' -- name: Send a mms to multiple users - community.general.catapult: - src: "+15035555555" - dest: - - "+12525089000" - - "+12018994225" - media: "http://example.com/foobar.jpg" - msg: "Task is complete" - user_id: "{{ user_id }}" - api_token: "{{ api_token }}" - api_secret: "{{ api_secret }}" - -- name: Send a sms to a single user - community.general.catapult: - src: "+15035555555" - dest: "+12018994225" - msg: "Consider yourself notified" - user_id: "{{ user_id }}" - api_token: "{{ api_token }}" - api_secret: "{{ api_secret }}" - -''' - -RETURN = ''' -changed: - description: Whether the api accepted the message. - returned: always - type: bool - sample: True -''' - - -import json - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url - - -def send(module, src, dest, msg, media, user_id, api_token, api_secret): - """ - Send the message - """ - AGENT = "Ansible" - URI = "https://api.catapult.inetwork.com/v1/users/%s/messages" % user_id - data = {'from': src, 'to': dest, 'text': msg} - if media: - data['media'] = media - - headers = {'User-Agent': AGENT, 'Content-type': 'application/json'} - - # Hack module params to have the Basic auth params that fetch_url expects - module.params['url_username'] = api_token.replace('\n', '') - module.params['url_password'] = api_secret.replace('\n', '') - - return fetch_url(module, URI, data=json.dumps(data), headers=headers, method="post") - - -def main(): - module = AnsibleModule( - argument_spec=dict( - src=dict(required=True), - dest=dict(required=True, type='list', elements='str'), - msg=dict(required=True), - user_id=dict(required=True), - api_token=dict(required=True, no_log=True), - api_secret=dict(required=True, no_log=True), - media=dict(default=None, required=False), - ), - ) - - src = module.params['src'] - dest = module.params['dest'] - msg = module.params['msg'] - media = module.params['media'] - user_id = module.params['user_id'] - api_token = module.params['api_token'] - api_secret = module.params['api_secret'] - - for number in dest: - rc, info = send(module, src, number, msg, media, user_id, api_token, api_secret) - if info["status"] != 201: - body = json.loads(info["body"]) - fail_msg = body["message"] - module.fail_json(msg=fail_msg) - - changed = True - module.exit_json(changed=changed) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/notification/cisco_spark.py b/ansible_collections/community/general/plugins/modules/notification/cisco_spark.py deleted file mode 120000 index 6fe1011f..00000000 --- a/ansible_collections/community/general/plugins/modules/notification/cisco_spark.py +++ /dev/null @@ -1 +0,0 @@ -cisco_webex.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/notification/cisco_webex.py b/ansible_collections/community/general/plugins/modules/notification/cisco_webex.py deleted file mode 100644 index 8c1361fb..00000000 --- a/ansible_collections/community/general/plugins/modules/notification/cisco_webex.py +++ /dev/null @@ -1,188 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: cisco_webex -short_description: Send a message to a Cisco Webex Teams Room or Individual -description: - - Send a message to a Cisco Webex Teams Room or Individual with options to control the formatting. -author: Drew Rusell (@drew-russell) -notes: - - The C(recipient_id) type must be valid for the supplied C(recipient_id). - - Full API documentation can be found at U(https://developer.webex.com/docs/api/basics). - -options: - - recipient_type: - description: - - The request parameter you would like to send the message to. - - Messages can be sent to either a room or individual (by ID or E-Mail). - required: yes - choices: ['roomId', 'toPersonEmail', 'toPersonId'] - type: str - - recipient_id: - description: - - The unique identifier associated with the supplied C(recipient_type). - required: yes - type: str - - msg_type: - description: - - Specifies how you would like the message formatted. - default: text - choices: ['text', 'markdown'] - type: str - aliases: ['message_type'] - - personal_token: - description: - - Your personal access token required to validate the Webex Teams API. - required: yes - aliases: ['token'] - type: str - - msg: - description: - - The message you would like to send. - required: yes - type: str -''' - -EXAMPLES = """ -# Note: The following examples assume a variable file has been imported -# that contains the appropriate information. - -- name: Cisco Webex Teams - Markdown Message to a Room - community.general.cisco_webex: - recipient_type: roomId - recipient_id: "{{ room_id }}" - msg_type: markdown - personal_token: "{{ token }}" - msg: "**Cisco Webex Teams Ansible Module - Room Message in Markdown**" - -- name: Cisco Webex Teams - Text Message to a Room - community.general.cisco_webex: - recipient_type: roomId - recipient_id: "{{ room_id }}" - msg_type: text - personal_token: "{{ token }}" - msg: "Cisco Webex Teams Ansible Module - Room Message in Text" - -- name: Cisco Webex Teams - Text Message by an Individuals ID - community.general.cisco_webex: - recipient_type: toPersonId - recipient_id: "{{ person_id}}" - msg_type: text - personal_token: "{{ token }}" - msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by ID" - -- name: Cisco Webex Teams - Text Message by an Individuals E-Mail Address - community.general.cisco_webex: - recipient_type: toPersonEmail - recipient_id: "{{ person_email }}" - msg_type: text - personal_token: "{{ token }}" - msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by E-Mail" - -""" - -RETURN = """ -status_code: - description: - - The Response Code returned by the Webex Teams API. - - Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics). - returned: always - type: int - sample: 200 - -message: - description: - - The Response Message returned by the Webex Teams API. - - Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics). - returned: always - type: str - sample: OK (585 bytes) -""" -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url - - -def webex_msg(module): - """When check mode is specified, establish a read only connection, that does not return any user specific - data, to validate connectivity. In regular mode, send a message to a Cisco Webex Teams Room or Individual""" - - # Ansible Specific Variables - results = {} - ansible = module.params - - headers = { - 'Authorization': 'Bearer {0}'.format(ansible['personal_token']), - 'content-type': 'application/json' - } - - if module.check_mode: - url = "https://webexapis.com/v1/people/me" - payload = None - - else: - url = "https://webexapis.com/v1/messages" - - payload = { - ansible['recipient_type']: ansible['recipient_id'], - ansible['msg_type']: ansible['msg'] - } - - payload = module.jsonify(payload) - - response, info = fetch_url(module, url, data=payload, headers=headers) - - status_code = info['status'] - msg = info['msg'] - - # Module will fail if the response is not 200 - if status_code != 200: - results['failed'] = True - results['status_code'] = status_code - results['message'] = msg - else: - results['failed'] = False - results['status_code'] = status_code - - if module.check_mode: - results['message'] = 'Authentication Successful.' - else: - results['message'] = msg - - return results - - -def main(): - '''Ansible main. ''' - module = AnsibleModule( - argument_spec=dict( - recipient_type=dict(required=True, choices=['roomId', 'toPersonEmail', 'toPersonId']), - recipient_id=dict(required=True, no_log=True), - msg_type=dict(required=False, default='text', aliases=['message_type'], choices=['text', 'markdown']), - personal_token=dict(required=True, no_log=True, aliases=['token']), - msg=dict(required=True), - ), - - supports_check_mode=True - ) - - results = webex_msg(module) - - module.exit_json(**results) - - -if __name__ == "__main__": - main() diff --git a/ansible_collections/community/general/plugins/modules/notification/discord.py b/ansible_collections/community/general/plugins/modules/notification/discord.py deleted file mode 100644 index 27dc6fc8..00000000 --- a/ansible_collections/community/general/plugins/modules/notification/discord.py +++ /dev/null @@ -1,215 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Christian Wollinger -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: discord -short_description: Send Discord messages -version_added: 3.1.0 -description: - - Sends a message to a Discord channel using the Discord webhook API. -author: Christian Wollinger (@cwollinger) -seealso: - - name: API documentation - description: Documentation for Discord API - link: https://discord.com/developers/docs/resources/webhook#execute-webhook -options: - webhook_id: - description: - - The webhook ID. - - "Format from Discord webhook URL: C(/webhooks/{webhook.id}/{webhook.token})." - required: yes - type: str - webhook_token: - description: - - The webhook token. - - "Format from Discord webhook URL: C(/webhooks/{webhook.id}/{webhook.token})." - required: yes - type: str - content: - description: - - Content of the message to the Discord channel. - - At least one of I(content) and I(embeds) must be specified. - type: str - username: - description: - - Overrides the default username of the webhook. - type: str - avatar_url: - description: - - Overrides the default avatar of the webhook. - type: str - tts: - description: - - Set this to C(true) if this is a TTS (Text to Speech) message. - type: bool - default: false - embeds: - description: - - Send messages as Embeds to the Discord channel. - - Embeds can have a colored border, embedded images, text fields and more. - - "Allowed parameters are described in the Discord Docs: U(https://discord.com/developers/docs/resources/channel#embed-object)" - - At least one of I(content) and I(embeds) must be specified. - type: list - elements: dict -''' - -EXAMPLES = """ -- name: Send a message to the Discord channel - community.general.discord: - webhook_id: "00000" - webhook_token: "XXXYYY" - content: "This is a message from ansible" - -- name: Send a message to the Discord channel with specific username and avatar - community.general.discord: - webhook_id: "00000" - webhook_token: "XXXYYY" - content: "This is a message from ansible" - username: Ansible - avatar_url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png" - -- name: Send a embedded message to the Discord channel - community.general.discord: - webhook_id: "00000" - webhook_token: "XXXYYY" - embeds: - - title: "Embedded message" - description: "This is an embedded message" - footer: - text: "Author: Ansible" - image: - url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png" - -- name: Send two embedded messages - community.general.discord: - webhook_id: "00000" - webhook_token: "XXXYYY" - embeds: - - title: "First message" - description: "This is my first embedded message" - footer: - text: "Author: Ansible" - image: - url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png" - - title: "Second message" - description: "This is my first second message" - footer: - text: "Author: Ansible" - icon_url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png" - fields: - - name: "Field 1" - value: "Value of my first field" - - name: "Field 2" - value: "Value of my second field" - timestamp: "{{ ansible_date_time.iso8601 }}" -""" - -RETURN = """ -http_code: - description: - - Response Code returned by Discord API. - returned: always - type: int - sample: 204 -""" - -from ansible.module_utils.urls import fetch_url -from ansible.module_utils.basic import AnsibleModule - - -def discord_check_mode(module): - - webhook_id = module.params['webhook_id'] - webhook_token = module.params['webhook_token'] - - headers = { - 'content-type': 'application/json' - } - - url = "https://discord.com/api/webhooks/%s/%s" % ( - webhook_id, webhook_token) - - response, info = fetch_url(module, url, method='GET', headers=headers) - return response, info - - -def discord_text_msg(module): - - webhook_id = module.params['webhook_id'] - webhook_token = module.params['webhook_token'] - content = module.params['content'] - user = module.params['username'] - avatar_url = module.params['avatar_url'] - tts = module.params['tts'] - embeds = module.params['embeds'] - - headers = { - 'content-type': 'application/json' - } - - url = "https://discord.com/api/webhooks/%s/%s" % ( - webhook_id, webhook_token) - - payload = { - 'content': content, - 'username': user, - 'avatar_url': avatar_url, - 'tts': tts, - 'embeds': embeds, - } - - payload = module.jsonify(payload) - - response, info = fetch_url(module, url, data=payload, headers=headers, method='POST') - return response, info - - -def main(): - module = AnsibleModule( - argument_spec=dict( - webhook_id=dict(type='str', required=True), - webhook_token=dict(type='str', required=True, no_log=True), - content=dict(type='str'), - username=dict(type='str'), - avatar_url=dict(type='str'), - tts=dict(type='bool', default=False), - embeds=dict(type='list', elements='dict'), - ), - required_one_of=[['content', 'embeds']], - supports_check_mode=True - ) - - result = dict( - changed=False, - http_code='', - ) - - if module.check_mode: - response, info = discord_check_mode(module) - if info['status'] != 200: - try: - module.fail_json(http_code=info['status'], msg=info['msg'], response=module.from_json(info['body']), info=info) - except Exception: - module.fail_json(http_code=info['status'], msg=info['msg'], info=info) - else: - module.exit_json(msg=info['msg'], changed=False, http_code=info['status'], response=module.from_json(response.read())) - else: - response, info = discord_text_msg(module) - if info['status'] != 204: - try: - module.fail_json(http_code=info['status'], msg=info['msg'], response=module.from_json(info['body']), info=info) - except Exception: - module.fail_json(http_code=info['status'], msg=info['msg'], info=info) - else: - module.exit_json(msg=info['msg'], changed=True, http_code=info['status']) - - -if __name__ == "__main__": - main() diff --git a/ansible_collections/community/general/plugins/modules/notification/flowdock.py b/ansible_collections/community/general/plugins/modules/notification/flowdock.py deleted file mode 100644 index a1842c5d..00000000 --- a/ansible_collections/community/general/plugins/modules/notification/flowdock.py +++ /dev/null @@ -1,197 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2013 Matt Coddington -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: flowdock -author: "Matt Coddington (@mcodd)" -short_description: Send a message to a flowdock -description: - - Send a message to a flowdock team inbox or chat using the push API (see https://www.flowdock.com/api/team-inbox and https://www.flowdock.com/api/chat) -options: - token: - type: str - description: - - API token. - required: true - type: - type: str - description: - - Whether to post to 'inbox' or 'chat' - required: true - choices: [ "inbox", "chat" ] - msg: - type: str - description: - - Content of the message - required: true - tags: - type: str - description: - - tags of the message, separated by commas - required: false - external_user_name: - type: str - description: - - (chat only - required) Name of the "user" sending the message - required: false - from_address: - type: str - description: - - (inbox only - required) Email address of the message sender - required: false - source: - type: str - description: - - (inbox only - required) Human readable identifier of the application that uses the Flowdock API - required: false - subject: - type: str - description: - - (inbox only - required) Subject line of the message - required: false - from_name: - type: str - description: - - (inbox only) Name of the message sender - required: false - reply_to: - type: str - description: - - (inbox only) Email address for replies - required: false - project: - type: str - description: - - (inbox only) Human readable identifier for more detailed message categorization - required: false - link: - type: str - description: - - (inbox only) Link associated with the message. This will be used to link the message subject in Team Inbox. - required: false - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - type: bool - -requirements: [ ] -''' - -EXAMPLES = ''' -- name: Send a message to a flowdock - community.general.flowdock: - type: inbox - token: AAAAAA - from_address: user@example.com - source: my cool app - msg: test from ansible - subject: test subject - -- name: Send a message to a flowdock - community.general.flowdock: - type: chat - token: AAAAAA - external_user_name: testuser - msg: test from ansible - tags: tag1,tag2,tag3 -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.urls import fetch_url - - -# =========================================== -# Module execution. -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - token=dict(required=True, no_log=True), - msg=dict(required=True), - type=dict(required=True, choices=["inbox", "chat"]), - external_user_name=dict(required=False), - from_address=dict(required=False), - source=dict(required=False), - subject=dict(required=False), - from_name=dict(required=False), - reply_to=dict(required=False), - project=dict(required=False), - tags=dict(required=False), - link=dict(required=False), - validate_certs=dict(default=True, type='bool'), - ), - supports_check_mode=True - ) - - type = module.params["type"] - token = module.params["token"] - if type == 'inbox': - url = "https://api.flowdock.com/v1/messages/team_inbox/%s" % (token) - else: - url = "https://api.flowdock.com/v1/messages/chat/%s" % (token) - - params = {} - - # required params - params['content'] = module.params["msg"] - - # required params for the 'chat' type - if module.params['external_user_name']: - if type == 'inbox': - module.fail_json(msg="external_user_name is not valid for the 'inbox' type") - else: - params['external_user_name'] = module.params["external_user_name"] - elif type == 'chat': - module.fail_json(msg="external_user_name is required for the 'chat' type") - - # required params for the 'inbox' type - for item in ['from_address', 'source', 'subject']: - if module.params[item]: - if type == 'chat': - module.fail_json(msg="%s is not valid for the 'chat' type" % item) - else: - params[item] = module.params[item] - elif type == 'inbox': - module.fail_json(msg="%s is required for the 'inbox' type" % item) - - # optional params - if module.params["tags"]: - params['tags'] = module.params["tags"] - - # optional params for the 'inbox' type - for item in ['from_name', 'reply_to', 'project', 'link']: - if module.params[item]: - if type == 'chat': - module.fail_json(msg="%s is not valid for the 'chat' type" % item) - else: - params[item] = module.params[item] - - # If we're in check mode, just exit pretending like we succeeded - if module.check_mode: - module.exit_json(changed=False) - - # Send the data to Flowdock - data = urlencode(params) - response, info = fetch_url(module, url, data=data) - if info['status'] != 200: - module.fail_json(msg="unable to send msg: %s" % info['msg']) - - module.exit_json(changed=True, msg=module.params["msg"]) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/notification/grove.py b/ansible_collections/community/general/plugins/modules/notification/grove.py deleted file mode 100644 index 12c91090..00000000 --- a/ansible_collections/community/general/plugins/modules/notification/grove.py +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: grove -short_description: Sends a notification to a grove.io channel -description: - - The C(grove) module sends a message for a service to a Grove.io - channel. -options: - channel_token: - type: str - description: - - Token of the channel to post to. - required: true - service: - type: str - description: - - Name of the service (displayed as the "user" in the message) - required: false - default: ansible - message_content: - type: str - description: - - Message content. - - The alias I(message) is deprecated and will be removed in community.general 4.0.0. - required: true - url: - type: str - description: - - Service URL for the web client - required: false - icon_url: - type: str - description: - - Icon for the service - required: false - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - default: 'yes' - type: bool -author: "Jonas Pfenniger (@zimbatm)" -''' - -EXAMPLES = ''' -- name: Sends a notification to a grove.io channel - community.general.grove: - channel_token: 6Ph62VBBJOccmtTPZbubiPzdrhipZXtg - service: my-app - message: 'deployed {{ target }}' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.urls import fetch_url - - -BASE_URL = 'https://grove.io/api/notice/%s/' - -# ============================================================== -# do_notify_grove - - -def do_notify_grove(module, channel_token, service, message, url=None, icon_url=None): - my_url = BASE_URL % (channel_token,) - - my_data = dict(service=service, message=message) - if url is not None: - my_data['url'] = url - if icon_url is not None: - my_data['icon_url'] = icon_url - - data = urlencode(my_data) - response, info = fetch_url(module, my_url, data=data) - if info['status'] != 200: - module.fail_json(msg="failed to send notification: %s" % info['msg']) - -# ============================================================== -# main - - -def main(): - module = AnsibleModule( - argument_spec=dict( - channel_token=dict(type='str', required=True, no_log=True), - message_content=dict(type='str', required=True), - service=dict(type='str', default='ansible'), - url=dict(type='str', default=None), - icon_url=dict(type='str', default=None), - validate_certs=dict(default=True, type='bool'), - ) - ) - - channel_token = module.params['channel_token'] - service = module.params['service'] - message = module.params['message_content'] - url = module.params['url'] - icon_url = module.params['icon_url'] - - do_notify_grove(module, channel_token, service, message, url, icon_url) - - # Mission complete - module.exit_json(msg="OK") - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/notification/hipchat.py b/ansible_collections/community/general/plugins/modules/notification/hipchat.py deleted file mode 100644 index 76c1227a..00000000 --- a/ansible_collections/community/general/plugins/modules/notification/hipchat.py +++ /dev/null @@ -1,212 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: hipchat -short_description: Send a message to Hipchat. -description: - - Send a message to a Hipchat room, with options to control the formatting. -options: - token: - type: str - description: - - API token. - required: true - room: - type: str - description: - - ID or name of the room. - required: true - msg_from: - type: str - description: - - Name the message will appear to be sent from. Max length is 15 - characters - above this it will be truncated. - default: Ansible - aliases: [from] - msg: - type: str - description: - - The message body. - required: true - color: - type: str - description: - - Background color for the message. - default: yellow - choices: [ "yellow", "red", "green", "purple", "gray", "random" ] - msg_format: - type: str - description: - - Message format. - default: text - choices: [ "text", "html" ] - notify: - description: - - If true, a notification will be triggered for users in the room. - type: bool - default: 'yes' - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - type: bool - default: 'yes' - api: - type: str - description: - - API url if using a self-hosted hipchat server. For Hipchat API version - 2 use the default URI with C(/v2) instead of C(/v1). - default: 'https://api.hipchat.com/v1' - -author: -- Shirou Wakayama (@shirou) -- Paul Bourdel (@pb8226) -''' - -EXAMPLES = ''' -- name: Send a message to a Hipchat room - community.general.hipchat: - room: notif - msg: Ansible task finished - -- name: Send a message to a Hipchat room using Hipchat API version 2 - community.general.hipchat: - api: https://api.hipchat.com/v2/ - token: OAUTH2_TOKEN - room: notify - msg: Ansible task finished -''' - -# =========================================== -# HipChat module specific support methods. -# - -import json -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.six.moves.urllib.request import pathname2url -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.urls import fetch_url - - -DEFAULT_URI = "https://api.hipchat.com/v1" - -MSG_URI_V1 = "/rooms/message" - -NOTIFY_URI_V2 = "/room/{id_or_name}/notification" - - -def send_msg_v1(module, token, room, msg_from, msg, msg_format='text', - color='yellow', notify=False, api=MSG_URI_V1): - '''sending message to hipchat v1 server''' - - params = {} - params['room_id'] = room - params['from'] = msg_from[:15] # max length is 15 - params['message'] = msg - params['message_format'] = msg_format - params['color'] = color - params['api'] = api - params['notify'] = int(notify) - - url = api + MSG_URI_V1 + "?auth_token=%s" % (token) - data = urlencode(params) - - if module.check_mode: - # In check mode, exit before actually sending the message - module.exit_json(changed=False) - - response, info = fetch_url(module, url, data=data) - if info['status'] == 200: - return response.read() - else: - module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) - - -def send_msg_v2(module, token, room, msg_from, msg, msg_format='text', - color='yellow', notify=False, api=NOTIFY_URI_V2): - '''sending message to hipchat v2 server''' - - headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'} - - body = dict() - body['message'] = msg - body['color'] = color - body['message_format'] = msg_format - body['notify'] = notify - - POST_URL = api + NOTIFY_URI_V2 - - url = POST_URL.replace('{id_or_name}', pathname2url(room)) - data = json.dumps(body) - - if module.check_mode: - # In check mode, exit before actually sending the message - module.exit_json(changed=False) - - response, info = fetch_url(module, url, data=data, headers=headers, method='POST') - - # https://www.hipchat.com/docs/apiv2/method/send_room_notification shows - # 204 to be the expected result code. - if info['status'] in [200, 204]: - return response.read() - else: - module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) - - -# =========================================== -# Module execution. -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - token=dict(required=True, no_log=True), - room=dict(required=True), - msg=dict(required=True), - msg_from=dict(default="Ansible", aliases=['from']), - color=dict(default="yellow", choices=["yellow", "red", "green", - "purple", "gray", "random"]), - msg_format=dict(default="text", choices=["text", "html"]), - notify=dict(default=True, type='bool'), - validate_certs=dict(default=True, type='bool'), - api=dict(default=DEFAULT_URI), - ), - supports_check_mode=True - ) - - token = module.params["token"] - room = str(module.params["room"]) - msg = module.params["msg"] - msg_from = module.params["msg_from"] - color = module.params["color"] - msg_format = module.params["msg_format"] - notify = module.params["notify"] - api = module.params["api"] - - try: - if api.find('/v2') != -1: - send_msg_v2(module, token, room, msg_from, msg, msg_format, color, notify, api) - else: - send_msg_v1(module, token, room, msg_from, msg, msg_format, color, notify, api) - except Exception as e: - module.fail_json(msg="unable to send msg: %s" % to_native(e), exception=traceback.format_exc()) - - changed = True - module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/notification/irc.py b/ansible_collections/community/general/plugins/modules/notification/irc.py deleted file mode 100644 index 9b1b91f5..00000000 --- a/ansible_collections/community/general/plugins/modules/notification/irc.py +++ /dev/null @@ -1,303 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Jan-Piet Mens -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: irc -short_description: Send a message to an IRC channel or a nick -description: - - Send a message to an IRC channel or a nick. This is a very simplistic implementation. -options: - server: - type: str - description: - - IRC server name/address - default: localhost - port: - type: int - description: - - IRC server port number - default: 6667 - nick: - type: str - description: - - Nickname to send the message from. May be shortened, depending on server's NICKLEN setting. - default: ansible - msg: - type: str - description: - - The message body. - required: true - topic: - type: str - description: - - Set the channel topic - color: - type: str - description: - - Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none"). - Added 11 more colors in version 2.0. - default: "none" - choices: [ "none", "white", "black", "blue", "green", "red", "brown", "purple", "orange", "yellow", "light_green", "teal", "light_cyan", - "light_blue", "pink", "gray", "light_gray"] - aliases: [colour] - channel: - type: str - description: - - Channel name. One of nick_to or channel needs to be set. When both are set, the message will be sent to both of them. - nick_to: - type: list - elements: str - description: - - A list of nicknames to send the message to. One of nick_to or channel needs to be set. When both are defined, the message will be sent to both of them. - key: - type: str - description: - - Channel key - passwd: - type: str - description: - - Server password - timeout: - type: int - description: - - Timeout to use while waiting for successful registration and join - messages, this is to prevent an endless loop - default: 30 - use_ssl: - description: - - Designates whether TLS/SSL should be used when connecting to the IRC server - type: bool - default: 'no' - part: - description: - - Designates whether user should part from channel after sending message or not. - Useful for when using a faux bot and not wanting join/parts between messages. - type: bool - default: 'yes' - style: - type: str - description: - - Text style for the message. Note italic does not work on some clients - choices: [ "bold", "underline", "reverse", "italic", "none" ] - default: none - -# informational: requirements for nodes -requirements: [ socket ] -author: - - "Jan-Piet Mens (@jpmens)" - - "Matt Martz (@sivel)" -''' - -EXAMPLES = ''' -- name: Send a message to an IRC channel from nick ansible - community.general.irc: - server: irc.example.net - channel: #t1 - msg: Hello world - -- name: Send a message to an IRC channel - local_action: - module: irc - port: 6669 - server: irc.example.net - channel: #t1 - msg: 'All finished at {{ ansible_date_time.iso8601 }}' - color: red - nick: ansibleIRC - -- name: Send a message to an IRC channel - local_action: - module: irc - port: 6669 - server: irc.example.net - channel: #t1 - nick_to: - - nick1 - - nick2 - msg: 'All finished at {{ ansible_date_time.iso8601 }}' - color: red - nick: ansibleIRC -''' - -# =========================================== -# IRC module support methods. -# - -import re -import socket -import ssl -import time -import traceback - -from ansible.module_utils.common.text.converters import to_native, to_bytes -from ansible.module_utils.basic import AnsibleModule - - -def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=None, key=None, topic=None, - nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False, part=True, style=None): - '''send message to IRC''' - nick_to = [] if nick_to is None else nick_to - - colornumbers = { - 'white': "00", - 'black': "01", - 'blue': "02", - 'green': "03", - 'red': "04", - 'brown': "05", - 'purple': "06", - 'orange': "07", - 'yellow': "08", - 'light_green': "09", - 'teal': "10", - 'light_cyan': "11", - 'light_blue': "12", - 'pink': "13", - 'gray': "14", - 'light_gray': "15", - } - - stylechoices = { - 'bold': "\x02", - 'underline': "\x1F", - 'reverse': "\x16", - 'italic': "\x1D", - } - - try: - styletext = stylechoices[style] - except Exception: - styletext = "" - - try: - colornumber = colornumbers[color] - colortext = "\x03" + colornumber - except Exception: - colortext = "" - - message = styletext + colortext + msg - - irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - if use_ssl: - irc = ssl.wrap_socket(irc) - irc.connect((server, int(port))) - - if passwd: - irc.send(to_bytes('PASS %s\r\n' % passwd)) - irc.send(to_bytes('NICK %s\r\n' % nick)) - irc.send(to_bytes('USER %s %s %s :ansible IRC\r\n' % (nick, nick, nick))) - motd = '' - start = time.time() - while 1: - motd += to_native(irc.recv(1024)) - # The server might send back a shorter nick than we specified (due to NICKLEN), - # so grab that and use it from now on (assuming we find the 00[1-4] response). - match = re.search(r'^:\S+ 00[1-4] (?P\S+) :', motd, flags=re.M) - if match: - nick = match.group('nick') - break - elif time.time() - start > timeout: - raise Exception('Timeout waiting for IRC server welcome response') - time.sleep(0.5) - - if channel: - if key: - irc.send(to_bytes('JOIN %s %s\r\n' % (channel, key))) - else: - irc.send(to_bytes('JOIN %s\r\n' % channel)) - - join = '' - start = time.time() - while 1: - join += to_native(irc.recv(1024)) - if re.search(r'^:\S+ 366 %s %s :' % (nick, channel), join, flags=re.M | re.I): - break - elif time.time() - start > timeout: - raise Exception('Timeout waiting for IRC JOIN response') - time.sleep(0.5) - - if topic is not None: - irc.send(to_bytes('TOPIC %s :%s\r\n' % (channel, topic))) - time.sleep(1) - - if nick_to: - for nick in nick_to: - irc.send(to_bytes('PRIVMSG %s :%s\r\n' % (nick, message))) - if channel: - irc.send(to_bytes('PRIVMSG %s :%s\r\n' % (channel, message))) - time.sleep(1) - if part: - if channel: - irc.send(to_bytes('PART %s\r\n' % channel)) - irc.send(to_bytes('QUIT\r\n')) - time.sleep(1) - irc.close() - -# =========================================== -# Main -# - - -def main(): - module = AnsibleModule( - argument_spec=dict( - server=dict(default='localhost'), - port=dict(type='int', default=6667), - nick=dict(default='ansible'), - nick_to=dict(required=False, type='list', elements='str'), - msg=dict(required=True), - color=dict(default="none", aliases=['colour'], choices=["white", "black", "blue", - "green", "red", "brown", - "purple", "orange", "yellow", - "light_green", "teal", "light_cyan", - "light_blue", "pink", "gray", - "light_gray", "none"]), - style=dict(default="none", choices=["underline", "reverse", "bold", "italic", "none"]), - channel=dict(required=False), - key=dict(no_log=True), - topic=dict(), - passwd=dict(no_log=True), - timeout=dict(type='int', default=30), - part=dict(type='bool', default=True), - use_ssl=dict(type='bool', default=False) - ), - supports_check_mode=True, - required_one_of=[['channel', 'nick_to']] - ) - - server = module.params["server"] - port = module.params["port"] - nick = module.params["nick"] - nick_to = module.params["nick_to"] - msg = module.params["msg"] - color = module.params["color"] - channel = module.params["channel"] - topic = module.params["topic"] - if topic and not channel: - module.fail_json(msg="When topic is specified, a channel is required.") - key = module.params["key"] - passwd = module.params["passwd"] - timeout = module.params["timeout"] - use_ssl = module.params["use_ssl"] - part = module.params["part"] - style = module.params["style"] - - try: - send_msg(msg, server, port, channel, nick_to, key, topic, nick, color, passwd, timeout, use_ssl, part, style) - except Exception as e: - module.fail_json(msg="unable to send to IRC: %s" % to_native(e), exception=traceback.format_exc()) - - module.exit_json(changed=False, channel=channel, nick=nick, - msg=msg) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/notification/jabber.py b/ansible_collections/community/general/plugins/modules/notification/jabber.py deleted file mode 100644 index 9b6811b3..00000000 --- a/ansible_collections/community/general/plugins/modules/notification/jabber.py +++ /dev/null @@ -1,166 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2015, Brian Coca -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: jabber -short_description: Send a message to jabber user or chat room -description: - - Send a message to jabber -options: - user: - type: str - description: - - User as which to connect - required: true - password: - type: str - description: - - password for user to connect - required: true - to: - type: str - description: - - user ID or name of the room, when using room use a slash to indicate your nick. - required: true - msg: - type: str - description: - - The message body. - required: true - host: - type: str - description: - - host to connect, overrides user info - port: - type: int - description: - - port to connect to, overrides default - default: 5222 - encoding: - type: str - description: - - message encoding - -# informational: requirements for nodes -requirements: - - python xmpp (xmpppy) -author: "Brian Coca (@bcoca)" -''' - -EXAMPLES = ''' -- name: Send a message to a user - community.general.jabber: - user: mybot@example.net - password: secret - to: friend@example.net - msg: Ansible task finished - -- name: Send a message to a room - community.general.jabber: - user: mybot@example.net - password: secret - to: mychaps@conference.example.net/ansiblebot - msg: Ansible task finished - -- name: Send a message, specifying the host and port - community.general.jabber: - user: mybot@example.net - host: talk.example.net - port: 5223 - password: secret - to: mychaps@example.net - msg: Ansible task finished -''' - -import time -import traceback - -HAS_XMPP = True -XMPP_IMP_ERR = None -try: - import xmpp -except ImportError: - XMPP_IMP_ERR = traceback.format_exc() - HAS_XMPP = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - user=dict(required=True), - password=dict(required=True, no_log=True), - to=dict(required=True), - msg=dict(required=True), - host=dict(required=False), - port=dict(required=False, default=5222, type='int'), - encoding=dict(required=False), - ), - supports_check_mode=True - ) - - if not HAS_XMPP: - module.fail_json(msg=missing_required_lib('xmpppy'), exception=XMPP_IMP_ERR) - - jid = xmpp.JID(module.params['user']) - user = jid.getNode() - server = jid.getDomain() - port = module.params['port'] - password = module.params['password'] - try: - to, nick = module.params['to'].split('/', 1) - except ValueError: - to, nick = module.params['to'], None - - if module.params['host']: - host = module.params['host'] - else: - host = server - if module.params['encoding']: - xmpp.simplexml.ENCODING = module.params['encoding'] - - msg = xmpp.protocol.Message(body=module.params['msg']) - - try: - conn = xmpp.Client(server, debug=[]) - if not conn.connect(server=(host, port)): - module.fail_json(rc=1, msg='Failed to connect to server: %s' % (server)) - if not conn.auth(user, password, 'Ansible'): - module.fail_json(rc=1, msg='Failed to authorize %s on: %s' % (user, server)) - # some old servers require this, also the sleep following send - conn.sendInitPresence(requestRoster=0) - - if nick: # sending to room instead of user, need to join - msg.setType('groupchat') - msg.setTag('x', namespace='http://jabber.org/protocol/muc#user') - join = xmpp.Presence(to=module.params['to']) - join.setTag('x', namespace='http://jabber.org/protocol/muc') - conn.send(join) - time.sleep(1) - else: - msg.setType('chat') - - msg.setTo(to) - if not module.check_mode: - conn.send(msg) - time.sleep(1) - conn.disconnect() - except Exception as e: - module.fail_json(msg="unable to send msg: %s" % to_native(e), exception=traceback.format_exc()) - - module.exit_json(changed=False, to=to, user=user, msg=msg.getBody()) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/notification/logentries_msg.py b/ansible_collections/community/general/plugins/modules/notification/logentries_msg.py deleted file mode 100644 index 59e0f325..00000000 --- a/ansible_collections/community/general/plugins/modules/notification/logentries_msg.py +++ /dev/null @@ -1,99 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: logentries_msg -short_description: Send a message to logentries. -description: - - Send a message to logentries -requirements: - - "python >= 2.6" -options: - token: - type: str - description: - - Log token. - required: true - msg: - type: str - description: - - The message body. - required: true - api: - type: str - description: - - API endpoint - default: data.logentries.com - port: - type: int - description: - - API endpoint port - default: 80 -author: "Jimmy Tang (@jcftang) " -''' - -RETURN = '''# ''' - -EXAMPLES = ''' -- name: Send a message to logentries - community.general.logentries_msg: - token=00000000-0000-0000-0000-000000000000 - msg="{{ ansible_hostname }}" -''' - -import socket - -from ansible.module_utils.basic import AnsibleModule - - -def send_msg(module, token, msg, api, port): - - message = "{0} {1}\n".format(token, msg) - - api_ip = socket.gethostbyname(api) - - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.connect((api_ip, port)) - try: - if not module.check_mode: - s.send(message) - except Exception as e: - module.fail_json(msg="failed to send message, msg=%s" % e) - s.close() - - -def main(): - module = AnsibleModule( - argument_spec=dict( - token=dict(type='str', required=True, no_log=True), - msg=dict(type='str', required=True), - api=dict(type='str', default="data.logentries.com"), - port=dict(type='int', default=80)), - supports_check_mode=True - ) - - token = module.params["token"] - msg = module.params["msg"] - api = module.params["api"] - port = module.params["port"] - - changed = False - try: - send_msg(module, token, msg, api, port) - changed = True - except Exception as e: - module.fail_json(msg="unable to send msg: %s" % e) - - module.exit_json(changed=changed, msg=msg) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/notification/mail.py b/ansible_collections/community/general/plugins/modules/notification/mail.py deleted file mode 100644 index 82ca6d52..00000000 --- a/ansible_collections/community/general/plugins/modules/notification/mail.py +++ /dev/null @@ -1,408 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2012, Dag Wieers (@dagwieers) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -author: -- Dag Wieers (@dagwieers) -module: mail -short_description: Send an email -description: -- This module is useful for sending emails from playbooks. -- One may wonder why automate sending emails? In complex environments - there are from time to time processes that cannot be automated, either - because you lack the authority to make it so, or because not everyone - agrees to a common approach. -- If you cannot automate a specific step, but the step is non-blocking, - sending out an email to the responsible party to make them perform their - part of the bargain is an elegant way to put the responsibility in - someone else's lap. -- Of course sending out a mail can be equally useful as a way to notify - one or more people in a team that a specific action has been - (successfully) taken. -options: - sender: - description: - - The email-address the mail is sent from. May contain address and phrase. - type: str - default: root - aliases: [ from ] - to: - description: - - The email-address(es) the mail is being sent to. - - This is a list, which may contain address and phrase portions. - type: list - elements: str - default: root - aliases: [ recipients ] - cc: - description: - - The email-address(es) the mail is being copied to. - - This is a list, which may contain address and phrase portions. - type: list - elements: str - bcc: - description: - - The email-address(es) the mail is being 'blind' copied to. - - This is a list, which may contain address and phrase portions. - type: list - elements: str - subject: - description: - - The subject of the email being sent. - required: yes - type: str - aliases: [ msg ] - body: - description: - - The body of the email being sent. - type: str - username: - description: - - If SMTP requires username. - type: str - password: - description: - - If SMTP requires password. - type: str - host: - description: - - The mail server. - type: str - default: localhost - port: - description: - - The mail server port. - - This must be a valid integer between 1 and 65534 - type: int - default: 25 - attach: - description: - - A list of pathnames of files to attach to the message. - - Attached files will have their content-type set to C(application/octet-stream). - type: list - elements: path - default: [] - headers: - description: - - A list of headers which should be added to the message. - - Each individual header is specified as C(header=value) (see example below). - type: list - elements: str - default: [] - charset: - description: - - The character set of email being sent. - type: str - default: utf-8 - subtype: - description: - - The minor mime type, can be either C(plain) or C(html). - - The major type is always C(text). - type: str - choices: [ html, plain ] - default: plain - secure: - description: - - If C(always), the connection will only send email if the connection is Encrypted. - If the server doesn't accept the encrypted connection it will fail. - - If C(try), the connection will attempt to setup a secure SSL/TLS session, before trying to send. - - If C(never), the connection will not attempt to setup a secure SSL/TLS session, before sending - - If C(starttls), the connection will try to upgrade to a secure SSL/TLS connection, before sending. - If it is unable to do so it will fail. - type: str - choices: [ always, never, starttls, try ] - default: try - timeout: - description: - - Sets the timeout in seconds for connection attempts. - type: int - default: 20 - ehlohost: - description: - - Allows for manual specification of host for EHLO. - type: str - version_added: 3.8.0 -''' - -EXAMPLES = r''' -- name: Example playbook sending mail to root - community.general.mail: - subject: System {{ ansible_hostname }} has been successfully provisioned. - delegate_to: localhost - -- name: Sending an e-mail using Gmail SMTP servers - community.general.mail: - host: smtp.gmail.com - port: 587 - username: username@gmail.com - password: mysecret - to: John Smith - subject: Ansible-report - body: System {{ ansible_hostname }} has been successfully provisioned. - delegate_to: localhost - -- name: Send e-mail to a bunch of users, attaching files - community.general.mail: - host: 127.0.0.1 - port: 2025 - subject: Ansible-report - body: Hello, this is an e-mail. I hope you like it ;-) - from: jane@example.net (Jane Jolie) - to: - - John Doe - - Suzie Something - cc: Charlie Root - attach: - - /etc/group - - /tmp/avatar2.png - headers: - - Reply-To=john@example.com - - X-Special="Something or other" - charset: us-ascii - delegate_to: localhost - -- name: Sending an e-mail using the remote machine, not the Ansible controller node - community.general.mail: - host: localhost - port: 25 - to: John Smith - subject: Ansible-report - body: System {{ ansible_hostname }} has been successfully provisioned. - -- name: Sending an e-mail using Legacy SSL to the remote machine - community.general.mail: - host: localhost - port: 25 - to: John Smith - subject: Ansible-report - body: System {{ ansible_hostname }} has been successfully provisioned. - secure: always - -- name: Sending an e-mail using StartTLS to the remote machine - community.general.mail: - host: localhost - port: 25 - to: John Smith - subject: Ansible-report - body: System {{ ansible_hostname }} has been successfully provisioned. - secure: starttls - -- name: Sending an e-mail using StartTLS, remote server, custom EHLO - community.general.mail: - host: some.smtp.host.tld - port: 25 - ehlohost: my-resolvable-hostname.tld - to: John Smith - subject: Ansible-report - body: System {{ ansible_hostname }} has been successfully provisioned. - secure: starttls -''' - -import os -import smtplib -import ssl -import traceback -from email import encoders -from email.utils import parseaddr, formataddr, formatdate -from email.mime.base import MIMEBase -from email.mime.multipart import MIMEMultipart -from email.mime.text import MIMEText -from email.header import Header - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six import PY3 -from ansible.module_utils.common.text.converters import to_native - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - username=dict(type='str'), - password=dict(type='str', no_log=True), - host=dict(type='str', default='localhost'), - port=dict(type='int', default=25), - ehlohost=dict(type='str', default=None), - sender=dict(type='str', default='root', aliases=['from']), - to=dict(type='list', elements='str', default=['root'], aliases=['recipients']), - cc=dict(type='list', elements='str', default=[]), - bcc=dict(type='list', elements='str', default=[]), - subject=dict(type='str', required=True, aliases=['msg']), - body=dict(type='str'), - attach=dict(type='list', elements='path', default=[]), - headers=dict(type='list', elements='str', default=[]), - charset=dict(type='str', default='utf-8'), - subtype=dict(type='str', default='plain', choices=['html', 'plain']), - secure=dict(type='str', default='try', choices=['always', 'never', 'starttls', 'try']), - timeout=dict(type='int', default=20), - ), - required_together=[['password', 'username']], - ) - - username = module.params.get('username') - password = module.params.get('password') - host = module.params.get('host') - port = module.params.get('port') - local_hostname = module.params.get('ehlohost') - sender = module.params.get('sender') - recipients = module.params.get('to') - copies = module.params.get('cc') - blindcopies = module.params.get('bcc') - subject = module.params.get('subject') - body = module.params.get('body') - attach_files = module.params.get('attach') - headers = module.params.get('headers') - charset = module.params.get('charset') - subtype = module.params.get('subtype') - secure = module.params.get('secure') - timeout = module.params.get('timeout') - - code = 0 - secure_state = False - sender_phrase, sender_addr = parseaddr(sender) - - if not body: - body = subject - - try: - if secure != 'never': - try: - if PY3: - smtp = smtplib.SMTP_SSL(host=host, port=port, local_hostname=local_hostname, timeout=timeout) - else: - smtp = smtplib.SMTP_SSL(local_hostname=local_hostname, timeout=timeout) - code, smtpmessage = smtp.connect(host, port) - secure_state = True - except ssl.SSLError as e: - if secure == 'always': - module.fail_json(rc=1, msg='Unable to start an encrypted session to %s:%s: %s' % - (host, port, to_native(e)), exception=traceback.format_exc()) - except Exception: - pass - - if not secure_state: - if PY3: - smtp = smtplib.SMTP(host=host, port=port, local_hostname=local_hostname, timeout=timeout) - else: - smtp = smtplib.SMTP(local_hostname=local_hostname, timeout=timeout) - code, smtpmessage = smtp.connect(host, port) - - except smtplib.SMTPException as e: - module.fail_json(rc=1, msg='Unable to Connect %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc()) - - try: - smtp.ehlo() - except smtplib.SMTPException as e: - module.fail_json(rc=1, msg='Helo failed for host %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc()) - - if int(code) > 0: - if not secure_state and secure in ('starttls', 'try'): - if smtp.has_extn('STARTTLS'): - try: - smtp.starttls() - secure_state = True - except smtplib.SMTPException as e: - module.fail_json(rc=1, msg='Unable to start an encrypted session to %s:%s: %s' % - (host, port, to_native(e)), exception=traceback.format_exc()) - try: - smtp.ehlo() - except smtplib.SMTPException as e: - module.fail_json(rc=1, msg='Helo failed for host %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc()) - else: - if secure == 'starttls': - module.fail_json(rc=1, msg='StartTLS is not offered on server %s:%s' % (host, port)) - - if username and password: - if smtp.has_extn('AUTH'): - try: - smtp.login(username, password) - except smtplib.SMTPAuthenticationError: - module.fail_json(rc=1, msg='Authentication to %s:%s failed, please check your username and/or password' % (host, port)) - except smtplib.SMTPException: - module.fail_json(rc=1, msg='No Suitable authentication method was found on %s:%s' % (host, port)) - else: - module.fail_json(rc=1, msg="No Authentication on the server at %s:%s" % (host, port)) - - if not secure_state and (username and password): - module.warn('Username and Password was sent without encryption') - - msg = MIMEMultipart(_charset=charset) - msg['From'] = formataddr((sender_phrase, sender_addr)) - msg['Date'] = formatdate(localtime=True) - msg['Subject'] = Header(subject, charset) - msg.preamble = "Multipart message" - - for header in headers: - # NOTE: Backward compatible with old syntax using '|' as delimiter - for hdr in [x.strip() for x in header.split('|')]: - try: - h_key, h_val = hdr.split('=') - h_val = to_native(Header(h_val, charset)) - msg.add_header(h_key, h_val) - except Exception: - module.warn("Skipping header '%s', unable to parse" % hdr) - - if 'X-Mailer' not in msg: - msg.add_header('X-Mailer', 'Ansible mail module') - - addr_list = [] - for addr in [x.strip() for x in blindcopies]: - addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase - - to_list = [] - for addr in [x.strip() for x in recipients]: - to_list.append(formataddr(parseaddr(addr))) - addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase - msg['To'] = ", ".join(to_list) - - cc_list = [] - for addr in [x.strip() for x in copies]: - cc_list.append(formataddr(parseaddr(addr))) - addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase - msg['Cc'] = ", ".join(cc_list) - - part = MIMEText(body + "\n\n", _subtype=subtype, _charset=charset) - msg.attach(part) - - # NOTE: Backware compatibility with old syntax using space as delimiter is not retained - # This breaks files with spaces in it :-( - for filename in attach_files: - try: - part = MIMEBase('application', 'octet-stream') - with open(filename, 'rb') as fp: - part.set_payload(fp.read()) - encoders.encode_base64(part) - part.add_header('Content-disposition', 'attachment', filename=os.path.basename(filename)) - msg.attach(part) - except Exception as e: - module.fail_json(rc=1, msg="Failed to send community.general.mail: can't attach file %s: %s" % - (filename, to_native(e)), exception=traceback.format_exc()) - - composed = msg.as_string() - - try: - result = smtp.sendmail(sender_addr, set(addr_list), composed) - except Exception as e: - module.fail_json(rc=1, msg="Failed to send mail to '%s': %s" % - (", ".join(set(addr_list)), to_native(e)), exception=traceback.format_exc()) - - smtp.quit() - - if result: - for key in result: - module.warn("Failed to send mail to '%s': %s %s" % (key, result[key][0], result[key][1])) - module.exit_json(msg='Failed to send mail to at least one recipient', result=result) - - module.exit_json(msg='Mail sent successfully', result=result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/notification/matrix.py b/ansible_collections/community/general/plugins/modules/notification/matrix.py deleted file mode 100644 index d94ed2b8..00000000 --- a/ansible_collections/community/general/plugins/modules/notification/matrix.py +++ /dev/null @@ -1,139 +0,0 @@ -#!/usr/bin/python -# coding: utf-8 - -# (c) 2018, Jan Christian Grünhage -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -author: "Jan Christian Grünhage (@jcgruenhage)" -module: matrix -short_description: Send notifications to matrix -description: - - This module sends html formatted notifications to matrix rooms. -options: - msg_plain: - type: str - description: - - Plain text form of the message to send to matrix, usually markdown - required: true - msg_html: - type: str - description: - - HTML form of the message to send to matrix - required: true - room_id: - type: str - description: - - ID of the room to send the notification to - required: true - hs_url: - type: str - description: - - URL of the homeserver, where the CS-API is reachable - required: true - token: - type: str - description: - - Authentication token for the API call. If provided, user_id and password are not required - user_id: - type: str - description: - - The user id of the user - password: - type: str - description: - - The password to log in with -requirements: - - matrix-client (Python library) -''' - -EXAMPLES = ''' -- name: Send matrix notification with token - community.general.matrix: - msg_plain: "**hello world**" - msg_html: "hello world" - room_id: "!12345678:server.tld" - hs_url: "https://matrix.org" - token: "{{ matrix_auth_token }}" - -- name: Send matrix notification with user_id and password - community.general.matrix: - msg_plain: "**hello world**" - msg_html: "hello world" - room_id: "!12345678:server.tld" - hs_url: "https://matrix.org" - user_id: "ansible_notification_bot" - password: "{{ matrix_auth_password }}" -''' - -RETURN = ''' -''' -import traceback - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - -MATRIX_IMP_ERR = None -try: - from matrix_client.client import MatrixClient -except ImportError: - MATRIX_IMP_ERR = traceback.format_exc() - matrix_found = False -else: - matrix_found = True - - -def run_module(): - module_args = dict( - msg_plain=dict(type='str', required=True), - msg_html=dict(type='str', required=True), - room_id=dict(type='str', required=True), - hs_url=dict(type='str', required=True), - token=dict(type='str', required=False, no_log=True), - user_id=dict(type='str', required=False), - password=dict(type='str', required=False, no_log=True), - ) - - result = dict( - changed=False, - message='' - ) - - module = AnsibleModule( - argument_spec=module_args, - mutually_exclusive=[['password', 'token']], - required_one_of=[['password', 'token']], - required_together=[['user_id', 'password']], - supports_check_mode=True - ) - - if not matrix_found: - module.fail_json(msg=missing_required_lib('matrix-client'), exception=MATRIX_IMP_ERR) - - if module.check_mode: - return result - - # create a client object - client = MatrixClient(module.params['hs_url']) - if module.params['token'] is not None: - client.api.token = module.params['token'] - else: - client.login(module.params['user_id'], module.params['password'], sync=False) - - # make sure we are in a given room and return a room object for it - room = client.join_room(module.params['room_id']) - # send an html formatted messages - room.send_html(module.params['msg_html'], module.params['msg_plain']) - - module.exit_json(**result) - - -def main(): - run_module() - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/notification/mattermost.py b/ansible_collections/community/general/plugins/modules/notification/mattermost.py deleted file mode 100644 index efee4c33..00000000 --- a/ansible_collections/community/general/plugins/modules/notification/mattermost.py +++ /dev/null @@ -1,179 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Benjamin Jolivot -# Inspired by slack module : -# # (c) 2017, Steve Pletcher -# # (c) 2016, René Moser -# # (c) 2015, Stefan Berggren -# # (c) 2014, Ramon de la Fuente ) -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: mattermost -short_description: Send Mattermost notifications -description: - - Sends notifications to U(http://your.mattermost.url) via the Incoming WebHook integration. -author: "Benjamin Jolivot (@bjolivot)" -options: - url: - type: str - description: - - Mattermost url (i.e. http://mattermost.yourcompany.com). - required: true - api_key: - type: str - description: - - Mattermost webhook api key. Log into your mattermost site, go to - Menu -> Integration -> Incoming Webhook -> Add Incoming Webhook. - This will give you full URL. api_key is the last part. - http://mattermost.example.com/hooks/C(API_KEY) - required: true - text: - type: str - description: - - Text to send. Note that the module does not handle escaping characters. - - Required when I(attachments) is not set. - attachments: - type: list - elements: dict - description: - - Define a list of attachments. - - For more information, see U(https://developers.mattermost.com/integrate/admin-guide/admin-message-attachments/). - - Required when I(text) is not set. - version_added: 4.3.0 - channel: - type: str - description: - - Channel to send the message to. If absent, the message goes to the channel selected for the I(api_key). - username: - type: str - description: - - This is the sender of the message (Username Override need to be enabled by mattermost admin, see mattermost doc. - default: Ansible - icon_url: - type: str - description: - - Url for the message sender's icon. - default: https://www.ansible.com/favicon.ico - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - default: yes - type: bool -''' - -EXAMPLES = """ -- name: Send notification message via Mattermost - community.general.mattermost: - url: http://mattermost.example.com - api_key: my_api_key - text: '{{ inventory_hostname }} completed' - -- name: Send notification message via Mattermost all options - community.general.mattermost: - url: http://mattermost.example.com - api_key: my_api_key - text: '{{ inventory_hostname }} completed' - channel: notifications - username: 'Ansible on {{ inventory_hostname }}' - icon_url: http://www.example.com/some-image-file.png - -- name: Send attachments message via Mattermost - community.general.mattermost: - url: http://mattermost.example.com - api_key: my_api_key - attachments: - - text: Display my system load on host A and B - color: '#ff00dd' - title: System load - fields: - - title: System A - value: "load average: 0,74, 0,66, 0,63" - short: True - - title: System B - value: 'load average: 5,16, 4,64, 2,43' - short: True -""" - -RETURN = ''' -payload: - description: Mattermost payload - returned: success - type: str -webhook_url: - description: URL the webhook is sent to - returned: success - type: str -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url - - -def main(): - module = AnsibleModule( - supports_check_mode=True, - argument_spec=dict( - url=dict(type='str', required=True), - api_key=dict(type='str', required=True, no_log=True), - text=dict(type='str'), - channel=dict(type='str', default=None), - username=dict(type='str', default='Ansible'), - icon_url=dict(type='str', default='https://www.ansible.com/favicon.ico'), - validate_certs=dict(default=True, type='bool'), - attachments=dict(type='list', elements='dict'), - ), - required_one_of=[ - ('text', 'attachments'), - ], - ) - # init return dict - result = dict(changed=False, msg="OK") - - # define webhook - webhook_url = "{0}/hooks/{1}".format(module.params['url'], module.params['api_key']) - result['webhook_url'] = webhook_url - - # define payload - payload = {} - for param in ['text', 'channel', 'username', 'icon_url', 'attachments']: - if module.params[param] is not None: - payload[param] = module.params[param] - - payload = module.jsonify(payload) - result['payload'] = payload - - # http headers - headers = { - 'Content-Type': 'application/json', - 'Accept': 'application/json', - } - - # notes: - # Nothing is done in check mode - # it'll pass even if your server is down or/and if your token is invalid. - # If someone find good way to check... - - # send request if not in test mode - if module.check_mode is False: - response, info = fetch_url(module=module, url=webhook_url, headers=headers, method='POST', data=payload) - - # something's wrong - if info['status'] != 200: - # some problem - result['msg'] = "Failed to send mattermost message, the error was: {0}".format(info['msg']) - module.fail_json(**result) - - # Looks good - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/notification/mqtt.py b/ansible_collections/community/general/plugins/modules/notification/mqtt.py deleted file mode 100644 index 60991961..00000000 --- a/ansible_collections/community/general/plugins/modules/notification/mqtt.py +++ /dev/null @@ -1,249 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, 2014, Jan-Piet Mens -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: mqtt -short_description: Publish a message on an MQTT topic for the IoT -description: - - Publish a message on an MQTT topic. -options: - server: - type: str - description: - - MQTT broker address/name - default: localhost - port: - type: int - description: - - MQTT broker port number - default: 1883 - username: - type: str - description: - - Username to authenticate against the broker. - password: - type: str - description: - - Password for C(username) to authenticate against the broker. - client_id: - type: str - description: - - MQTT client identifier - - If not specified, a value C(hostname + pid) will be used. - topic: - type: str - description: - - MQTT topic name - required: true - payload: - type: str - description: - - Payload. The special string C("None") may be used to send a NULL - (i.e. empty) payload which is useful to simply notify with the I(topic) - or to clear previously retained messages. - required: true - qos: - type: str - description: - - QoS (Quality of Service) - default: "0" - choices: [ "0", "1", "2" ] - retain: - description: - - Setting this flag causes the broker to retain (i.e. keep) the message so that - applications that subsequently subscribe to the topic can received the last - retained message immediately. - type: bool - default: 'no' - ca_cert: - type: path - description: - - The path to the Certificate Authority certificate files that are to be - treated as trusted by this client. If this is the only option given - then the client will operate in a similar manner to a web browser. That - is to say it will require the broker to have a certificate signed by the - Certificate Authorities in ca_certs and will communicate using TLS v1, - but will not attempt any form of authentication. This provides basic - network encryption but may not be sufficient depending on how the broker - is configured. - aliases: [ ca_certs ] - client_cert: - type: path - description: - - The path pointing to the PEM encoded client certificate. If this is not - None it will be used as client information for TLS based - authentication. Support for this feature is broker dependent. - aliases: [ certfile ] - client_key: - type: path - description: - - The path pointing to the PEM encoded client private key. If this is not - None it will be used as client information for TLS based - authentication. Support for this feature is broker dependent. - aliases: [ keyfile ] - tls_version: - description: - - Specifies the version of the SSL/TLS protocol to be used. - - By default (if the python version supports it) the highest TLS version is - detected. If unavailable, TLS v1 is used. - type: str - choices: - - tlsv1.1 - - tlsv1.2 -requirements: [ mosquitto ] -notes: - - This module requires a connection to an MQTT broker such as Mosquitto - U(http://mosquitto.org) and the I(Paho) C(mqtt) Python client (U(https://pypi.org/project/paho-mqtt/)). -author: "Jan-Piet Mens (@jpmens)" -''' - -EXAMPLES = ''' -- name: Publish a message on an MQTT topic - community.general.mqtt: - topic: 'service/ansible/{{ ansible_hostname }}' - payload: 'Hello at {{ ansible_date_time.iso8601 }}' - qos: 0 - retain: False - client_id: ans001 - delegate_to: localhost -''' - -# =========================================== -# MQTT module support methods. -# - -import os -import ssl -import traceback -import platform - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -HAS_PAHOMQTT = True -PAHOMQTT_IMP_ERR = None -try: - import socket - import paho.mqtt.publish as mqtt -except ImportError: - PAHOMQTT_IMP_ERR = traceback.format_exc() - HAS_PAHOMQTT = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -# =========================================== -# Main -# - -def main(): - tls_map = {} - - try: - tls_map['tlsv1.2'] = ssl.PROTOCOL_TLSv1_2 - except AttributeError: - pass - - try: - tls_map['tlsv1.1'] = ssl.PROTOCOL_TLSv1_1 - except AttributeError: - pass - - module = AnsibleModule( - argument_spec=dict( - server=dict(default='localhost'), - port=dict(default=1883, type='int'), - topic=dict(required=True), - payload=dict(required=True), - client_id=dict(default=None), - qos=dict(default="0", choices=["0", "1", "2"]), - retain=dict(default=False, type='bool'), - username=dict(default=None), - password=dict(default=None, no_log=True), - ca_cert=dict(default=None, type='path', aliases=['ca_certs']), - client_cert=dict(default=None, type='path', aliases=['certfile']), - client_key=dict(default=None, type='path', aliases=['keyfile']), - tls_version=dict(default=None, choices=['tlsv1.1', 'tlsv1.2']) - ), - supports_check_mode=True - ) - - if not HAS_PAHOMQTT: - module.fail_json(msg=missing_required_lib('paho-mqtt'), exception=PAHOMQTT_IMP_ERR) - - server = module.params.get("server", 'localhost') - port = module.params.get("port", 1883) - topic = module.params.get("topic") - payload = module.params.get("payload") - client_id = module.params.get("client_id", '') - qos = int(module.params.get("qos", 0)) - retain = module.params.get("retain") - username = module.params.get("username", None) - password = module.params.get("password", None) - ca_certs = module.params.get("ca_cert", None) - certfile = module.params.get("client_cert", None) - keyfile = module.params.get("client_key", None) - tls_version = module.params.get("tls_version", None) - - if client_id is None: - client_id = "%s_%s" % (socket.getfqdn(), os.getpid()) - - if payload and payload == 'None': - payload = None - - auth = None - if username is not None: - auth = {'username': username, 'password': password} - - tls = None - if ca_certs is not None: - if tls_version: - tls_version = tls_map.get(tls_version, ssl.PROTOCOL_SSLv23) - else: - if LooseVersion(platform.python_version()) <= LooseVersion("3.5.2"): - # Specifying `None` on later versions of python seems sufficient to - # instruct python to autonegotiate the SSL/TLS connection. On versions - # 3.5.2 and lower though we need to specify the version. - # - # Note that this is an alias for PROTOCOL_TLS, but PROTOCOL_TLS was - # not available until 3.5.3. - tls_version = ssl.PROTOCOL_SSLv23 - - tls = { - 'ca_certs': ca_certs, - 'certfile': certfile, - 'keyfile': keyfile, - 'tls_version': tls_version, - } - - try: - mqtt.single( - topic, - payload, - qos=qos, - retain=retain, - client_id=client_id, - hostname=server, - port=port, - auth=auth, - tls=tls - ) - except Exception as e: - module.fail_json( - msg="unable to publish to MQTT broker %s" % to_native(e), - exception=traceback.format_exc() - ) - - module.exit_json(changed=False, topic=topic) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/notification/nexmo.py b/ansible_collections/community/general/plugins/modules/notification/nexmo.py deleted file mode 100644 index d239bb44..00000000 --- a/ansible_collections/community/general/plugins/modules/notification/nexmo.py +++ /dev/null @@ -1,136 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Matt Martz -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: nexmo -short_description: Send a SMS via nexmo -description: - - Send a SMS message via nexmo -author: "Matt Martz (@sivel)" -options: - api_key: - type: str - description: - - Nexmo API Key - required: true - api_secret: - type: str - description: - - Nexmo API Secret - required: true - src: - type: int - description: - - Nexmo Number to send from - required: true - dest: - type: list - elements: int - description: - - Phone number(s) to send SMS message to - required: true - msg: - type: str - description: - - Message to text to send. Messages longer than 160 characters will be - split into multiple messages - required: true - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - type: bool - default: 'yes' -extends_documentation_fragment: - - url -''' - -EXAMPLES = """ -- name: Send notification message via Nexmo - community.general.nexmo: - api_key: 640c8a53 - api_secret: 0ce239a6 - src: 12345678901 - dest: - - 10987654321 - - 16789012345 - msg: '{{ inventory_hostname }} completed' - delegate_to: localhost -""" -import json - -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url, url_argument_spec - - -NEXMO_API = 'https://rest.nexmo.com/sms/json' - - -def send_msg(module): - failed = list() - responses = dict() - msg = { - 'api_key': module.params.get('api_key'), - 'api_secret': module.params.get('api_secret'), - 'from': module.params.get('src'), - 'text': module.params.get('msg') - } - for number in module.params.get('dest'): - msg['to'] = number - url = "%s?%s" % (NEXMO_API, urlencode(msg)) - - headers = dict(Accept='application/json') - response, info = fetch_url(module, url, headers=headers) - if info['status'] != 200: - failed.append(number) - responses[number] = dict(failed=True) - - try: - responses[number] = json.load(response) - except Exception: - failed.append(number) - responses[number] = dict(failed=True) - else: - for message in responses[number]['messages']: - if int(message['status']) != 0: - failed.append(number) - responses[number] = dict(failed=True, **responses[number]) - - if failed: - msg = 'One or messages failed to send' - else: - msg = '' - - module.exit_json(failed=bool(failed), msg=msg, changed=False, - responses=responses) - - -def main(): - argument_spec = url_argument_spec() - argument_spec.update( - dict( - api_key=dict(required=True, no_log=True), - api_secret=dict(required=True, no_log=True), - src=dict(required=True, type='int'), - dest=dict(required=True, type='list', elements='int'), - msg=dict(required=True), - ), - ) - - module = AnsibleModule( - argument_spec=argument_spec - ) - - send_msg(module) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/notification/office_365_connector_card.py b/ansible_collections/community/general/plugins/modules/notification/office_365_connector_card.py deleted file mode 100644 index 04d5e385..00000000 --- a/ansible_collections/community/general/plugins/modules/notification/office_365_connector_card.py +++ /dev/null @@ -1,301 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2017 Marc Sensenich -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' -module: office_365_connector_card -short_description: Use webhooks to create Connector Card messages within an Office 365 group -description: - - Creates Connector Card messages through - - Office 365 Connectors U(https://dev.outlook.com/Connectors) -author: "Marc Sensenich (@marc-sensenich)" -notes: - - This module is not idempotent, therefore if the same task is run twice - there will be two Connector Cards created -options: - webhook: - type: str - description: - - The webhook URL is given to you when you create a new Connector. - required: true - summary: - type: str - description: - - A string used for summarizing card content. - - This will be shown as the message subject. - - This is required if the text parameter isn't populated. - color: - type: str - description: - - Accent color used for branding or indicating status in the card. - title: - type: str - description: - - A title for the Connector message. Shown at the top of the message. - text: - type: str - description: - - The main text of the card. - - This will be rendered below the sender information and optional title, - - and above any sections or actions present. - actions: - type: list - elements: dict - description: - - This array of objects will power the action links - - found at the bottom of the card. - sections: - type: list - elements: dict - description: - - Contains a list of sections to display in the card. - - For more information see https://dev.outlook.com/Connectors/reference. -''' - -EXAMPLES = """ -- name: Create a simple Connector Card - community.general.office_365_connector_card: - webhook: https://outlook.office.com/webhook/GUID/IncomingWebhook/GUID/GUID - text: 'Hello, World!' - -- name: Create a Connector Card with the full format - community.general.office_365_connector_card: - webhook: https://outlook.office.com/webhook/GUID/IncomingWebhook/GUID/GUID - summary: This is the summary property - title: This is the **card's title** property - text: This is the **card's text** property. Lorem ipsum dolor sit amet, consectetur - adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. - color: E81123 - sections: - - title: This is the **section's title** property - activity_image: http://connectorsdemo.azurewebsites.net/images/MSC12_Oscar_002.jpg - activity_title: This is the section's **activityTitle** property - activity_subtitle: This is the section's **activitySubtitle** property - activity_text: This is the section's **activityText** property. - hero_image: - image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg - title: This is the image's alternate text - text: This is the section's text property. Lorem ipsum dolor sit amet, consectetur - adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. - facts: - - name: This is a fact name - value: This is a fact value - - name: This is a fact name - value: This is a fact value - - name: This is a fact name - value: This is a fact value - images: - - image: http://connectorsdemo.azurewebsites.net/images/MicrosoftSurface_024_Cafe_OH-06315_VS_R1c.jpg - title: This is the image's alternate text - - image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg - title: This is the image's alternate text - - image: http://connectorsdemo.azurewebsites.net/images/WIN12_Anthony_02.jpg - title: This is the image's alternate text - actions: - - "@type": ActionCard - name: Comment - inputs: - - "@type": TextInput - id: comment - is_multiline: true - title: Input's title property - actions: - - "@type": HttpPOST - name: Save - target: http://... - - "@type": ActionCard - name: Due Date - inputs: - - "@type": DateInput - id: dueDate - title: Input's title property - actions: - - "@type": HttpPOST - name: Save - target: http://... - - "@type": HttpPOST - name: Action's name prop. - target: http://... - - "@type": OpenUri - name: Action's name prop - targets: - - os: default - uri: http://... - - start_group: true - title: This is the title of a **second section** - text: This second section is visually separated from the first one by setting its - **startGroup** property to true. -""" - -RETURN = """ -""" - -# import module snippets -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict - -OFFICE_365_CARD_CONTEXT = "http://schema.org/extensions" -OFFICE_365_CARD_TYPE = "MessageCard" -OFFICE_365_CARD_EMPTY_PAYLOAD_MSG = "Summary or Text is required." -OFFICE_365_INVALID_WEBHOOK_MSG = "The Incoming Webhook was not reachable." - - -def build_actions(actions): - action_items = [] - - for action in actions: - action_item = snake_dict_to_camel_dict(action) - action_items.append(action_item) - - return action_items - - -def build_sections(sections): - sections_created = [] - - for section in sections: - sections_created.append(build_section(section)) - - return sections_created - - -def build_section(section): - section_payload = dict() - - if 'title' in section: - section_payload['title'] = section['title'] - - if 'start_group' in section: - section_payload['startGroup'] = section['start_group'] - - if 'activity_image' in section: - section_payload['activityImage'] = section['activity_image'] - - if 'activity_title' in section: - section_payload['activityTitle'] = section['activity_title'] - - if 'activity_subtitle' in section: - section_payload['activitySubtitle'] = section['activity_subtitle'] - - if 'activity_text' in section: - section_payload['activityText'] = section['activity_text'] - - if 'hero_image' in section: - section_payload['heroImage'] = section['hero_image'] - - if 'text' in section: - section_payload['text'] = section['text'] - - if 'facts' in section: - section_payload['facts'] = section['facts'] - - if 'images' in section: - section_payload['images'] = section['images'] - - if 'actions' in section: - section_payload['potentialAction'] = build_actions(section['actions']) - - return section_payload - - -def build_payload_for_connector_card(module, summary=None, color=None, title=None, text=None, actions=None, sections=None): - payload = dict() - payload['@context'] = OFFICE_365_CARD_CONTEXT - payload['@type'] = OFFICE_365_CARD_TYPE - - if summary is not None: - payload['summary'] = summary - - if color is not None: - payload['themeColor'] = color - - if title is not None: - payload['title'] = title - - if text is not None: - payload['text'] = text - - if actions: - payload['potentialAction'] = build_actions(actions) - - if sections: - payload['sections'] = build_sections(sections) - - payload = module.jsonify(payload) - return payload - - -def do_notify_connector_card_webhook(module, webhook, payload): - headers = { - 'Content-Type': 'application/json' - } - - response, info = fetch_url( - module=module, - url=webhook, - headers=headers, - method='POST', - data=payload - ) - - if info['status'] == 200: - module.exit_json(changed=True) - elif info['status'] == 400 and module.check_mode: - if info['body'] == OFFICE_365_CARD_EMPTY_PAYLOAD_MSG: - module.exit_json(changed=True) - else: - module.fail_json(msg=OFFICE_365_INVALID_WEBHOOK_MSG) - else: - module.fail_json( - msg="failed to send %s as a connector card to Incoming Webhook: %s" - % (payload, info['msg']) - ) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - webhook=dict(required=True, no_log=True), - summary=dict(type='str'), - color=dict(type='str'), - title=dict(type='str'), - text=dict(type='str'), - actions=dict(type='list', elements='dict'), - sections=dict(type='list', elements='dict') - ), - supports_check_mode=True - ) - - webhook = module.params['webhook'] - summary = module.params['summary'] - color = module.params['color'] - title = module.params['title'] - text = module.params['text'] - actions = module.params['actions'] - sections = module.params['sections'] - - payload = build_payload_for_connector_card( - module, - summary, - color, - title, - text, - actions, - sections) - - if module.check_mode: - # In check mode, send an empty payload to validate connection - check_mode_payload = build_payload_for_connector_card(module) - do_notify_connector_card_webhook(module, webhook, check_mode_payload) - - do_notify_connector_card_webhook(module, webhook, payload) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/notification/pushbullet.py b/ansible_collections/community/general/plugins/modules/notification/pushbullet.py deleted file mode 100644 index 435fcf2f..00000000 --- a/ansible_collections/community/general/plugins/modules/notification/pushbullet.py +++ /dev/null @@ -1,189 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -author: "Willy Barro (@willybarro)" -requirements: [ pushbullet.py ] -module: pushbullet -short_description: Sends notifications to Pushbullet -description: - - This module sends push notifications via Pushbullet to channels or devices. -options: - api_key: - type: str - description: - - Push bullet API token - required: true - channel: - type: str - description: - - The channel TAG you wish to broadcast a push notification, - as seen on the "My Channels" > "Edit your channel" at - Pushbullet page. - device: - type: str - description: - - The device NAME you wish to send a push notification, - as seen on the Pushbullet main page. - push_type: - type: str - description: - - Thing you wish to push. - default: note - choices: [ "note", "link" ] - title: - type: str - description: - - Title of the notification. - required: true - body: - type: str - description: - - Body of the notification, e.g. Details of the fault you're alerting. - url: - type: str - description: - - URL field, used when I(push_type) is C(link). - -notes: - - Requires pushbullet.py Python package on the remote host. - You can install it via pip with ($ pip install pushbullet.py). - See U(https://github.com/randomchars/pushbullet.py) -''' - -EXAMPLES = ''' -- name: Sends a push notification to a device - community.general.pushbullet: - api_key: "ABC123abc123ABC123abc123ABC123ab" - device: "Chrome" - title: "You may see this on Google Chrome" - -- name: Sends a link to a device - community.general.pushbullet: - api_key: ABC123abc123ABC123abc123ABC123ab - device: Chrome - push_type: link - title: Ansible Documentation - body: https://docs.ansible.com/ - -- name: Sends a push notification to a channel - community.general.pushbullet: - api_key: ABC123abc123ABC123abc123ABC123ab - channel: my-awesome-channel - title: Broadcasting a message to the #my-awesome-channel folks - -- name: Sends a push notification with title and body to a channel - community.general.pushbullet: - api_key: ABC123abc123ABC123abc123ABC123ab - channel: my-awesome-channel - title: ALERT! Signup service is down - body: Error rate on signup service is over 90% for more than 2 minutes -''' - -import traceback - -PUSHBULLET_IMP_ERR = None -try: - from pushbullet import PushBullet - from pushbullet.errors import InvalidKeyError, PushError -except ImportError: - PUSHBULLET_IMP_ERR = traceback.format_exc() - pushbullet_found = False -else: - pushbullet_found = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -# =========================================== -# Main -# - -def main(): - module = AnsibleModule( - argument_spec=dict( - api_key=dict(type='str', required=True, no_log=True), - channel=dict(type='str', default=None), - device=dict(type='str', default=None), - push_type=dict(type='str', default="note", choices=['note', 'link']), - title=dict(type='str', required=True), - body=dict(type='str', default=None), - url=dict(type='str', default=None), - ), - mutually_exclusive=( - ['channel', 'device'], - ), - supports_check_mode=True - ) - - api_key = module.params['api_key'] - channel = module.params['channel'] - device = module.params['device'] - push_type = module.params['push_type'] - title = module.params['title'] - body = module.params['body'] - url = module.params['url'] - - if not pushbullet_found: - module.fail_json(msg=missing_required_lib('pushbullet.py'), exception=PUSHBULLET_IMP_ERR) - - # Init pushbullet - try: - pb = PushBullet(api_key) - target = None - except InvalidKeyError: - module.fail_json(msg="Invalid api_key") - - # Checks for channel/device - if device is None and channel is None: - module.fail_json(msg="You need to provide a channel or a device.") - - # Search for given device - if device is not None: - devices_by_nickname = {} - for d in pb.devices: - devices_by_nickname[d.nickname] = d - - if device in devices_by_nickname: - target = devices_by_nickname[device] - else: - module.fail_json(msg="Device '%s' not found. Available devices: '%s'" % (device, "', '".join(devices_by_nickname.keys()))) - - # Search for given channel - if channel is not None: - channels_by_tag = {} - for c in pb.channels: - channels_by_tag[c.channel_tag] = c - - if channel in channels_by_tag: - target = channels_by_tag[channel] - else: - module.fail_json(msg="Channel '%s' not found. Available channels: '%s'" % (channel, "', '".join(channels_by_tag.keys()))) - - # If in check mode, exit saying that we succeeded - if module.check_mode: - module.exit_json(changed=False, msg="OK") - - # Send push notification - try: - if push_type == "link": - target.push_link(title, url, body) - else: - target.push_note(title, body) - module.exit_json(changed=False, msg="OK") - except PushError as e: - module.fail_json(msg="An error occurred, Pushbullet's response: %s" % str(e)) - - module.fail_json(msg="An unknown error has occurred") - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/notification/pushover.py b/ansible_collections/community/general/plugins/modules/notification/pushover.py deleted file mode 100644 index 7f73592a..00000000 --- a/ansible_collections/community/general/plugins/modules/notification/pushover.py +++ /dev/null @@ -1,153 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2012, Jim Richardson -# Copyright (c) 2019, Bernd Arnold -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: pushover -short_description: Send notifications via U(https://pushover.net) -description: - - Send notifications via pushover, to subscriber list of devices, and email - addresses. Requires pushover app on devices. -notes: - - You will require a pushover.net account to use this module. But no account - is required to receive messages. -options: - msg: - type: str - description: - - What message you wish to send. - required: true - app_token: - type: str - description: - - Pushover issued token identifying your pushover app. - required: true - user_key: - type: str - description: - - Pushover issued authentication key for your user. - required: true - title: - type: str - description: - - Message title. - required: false - pri: - type: str - description: - - Message priority (see U(https://pushover.net) for details). - required: false - default: '0' - choices: [ '-2', '-1', '0', '1', '2' ] - device: - type: str - description: - - A device the message should be sent to. Multiple devices can be specified, separated by a comma. - required: false - version_added: 1.2.0 - -author: - - "Jim Richardson (@weaselkeeper)" - - "Bernd Arnold (@wopfel)" -''' - -EXAMPLES = ''' -- name: Send notifications via pushover.net - community.general.pushover: - msg: '{{ inventory_hostname }} is acting strange ...' - app_token: wxfdksl - user_key: baa5fe97f2c5ab3ca8f0bb59 - delegate_to: localhost - -- name: Send notifications via pushover.net - community.general.pushover: - title: 'Alert!' - msg: '{{ inventory_hostname }} has exploded in flames, It is now time to panic' - pri: 1 - app_token: wxfdksl - user_key: baa5fe97f2c5ab3ca8f0bb59 - delegate_to: localhost - -- name: Send notifications via pushover.net to a specific device - community.general.pushover: - msg: '{{ inventory_hostname }} has been lost somewhere' - app_token: wxfdksl - user_key: baa5fe97f2c5ab3ca8f0bb59 - device: admins-iPhone - delegate_to: localhost -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.urls import fetch_url - - -class Pushover(object): - ''' Instantiates a pushover object, use it to send notifications ''' - base_uri = 'https://api.pushover.net' - - def __init__(self, module, user, token): - self.module = module - self.user = user - self.token = token - - def run(self, priority, msg, title, device): - ''' Do, whatever it is, we do. ''' - - url = '%s/1/messages.json' % (self.base_uri) - - # parse config - options = dict(user=self.user, - token=self.token, - priority=priority, - message=msg) - - if title is not None: - options = dict(options, - title=title) - - if device is not None: - options = dict(options, - device=device) - - data = urlencode(options) - - headers = {"Content-type": "application/x-www-form-urlencoded"} - r, info = fetch_url(self.module, url, method='POST', data=data, headers=headers) - if info['status'] != 200: - raise Exception(info) - - return r.read() - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - title=dict(type='str'), - msg=dict(required=True), - app_token=dict(required=True, no_log=True), - user_key=dict(required=True, no_log=True), - pri=dict(required=False, default='0', choices=['-2', '-1', '0', '1', '2']), - device=dict(type='str'), - ), - ) - - msg_object = Pushover(module, module.params['user_key'], module.params['app_token']) - try: - response = msg_object.run(module.params['pri'], module.params['msg'], module.params['title'], module.params['device']) - except Exception: - module.fail_json(msg='Unable to send msg via pushover') - - module.exit_json(msg='message sent successfully: %s' % response, changed=False) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/notification/rocketchat.py b/ansible_collections/community/general/plugins/modules/notification/rocketchat.py deleted file mode 100644 index 500560e4..00000000 --- a/ansible_collections/community/general/plugins/modules/notification/rocketchat.py +++ /dev/null @@ -1,242 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2016, Deepak Kothandan -# (c) 2015, Stefan Berggren -# (c) 2014, Ramon de la Fuente -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: rocketchat -short_description: Send notifications to Rocket Chat -description: - - The C(rocketchat) module sends notifications to Rocket Chat via the Incoming WebHook integration -author: "Ramon de la Fuente (@ramondelafuente)" -options: - domain: - type: str - description: - - The domain for your environment without protocol. (i.e. - C(example.com) or C(chat.example.com)) - required: true - token: - type: str - description: - - Rocket Chat Incoming Webhook integration token. This provides - authentication to Rocket Chat's Incoming webhook for posting - messages. - required: true - protocol: - type: str - description: - - Specify the protocol used to send notification messages before the webhook url. (i.e. http or https) - default: https - choices: - - 'http' - - 'https' - msg: - type: str - description: - - Message to be sent. - channel: - type: str - description: - - Channel to send the message to. If absent, the message goes to the channel selected for the I(token) - specified during the creation of webhook. - username: - type: str - description: - - This is the sender of the message. - default: "Ansible" - icon_url: - type: str - description: - - URL for the message sender's icon. - default: "https://www.ansible.com/favicon.ico" - icon_emoji: - type: str - description: - - Emoji for the message sender. The representation for the available emojis can be - got from Rocket Chat. (for example :thumbsup:) (if I(icon_emoji) is set, I(icon_url) will not be used) - link_names: - type: int - description: - - Automatically create links for channels and usernames in I(msg). - default: 1 - choices: - - 1 - - 0 - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - type: bool - default: 'yes' - color: - type: str - description: - - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message - default: 'normal' - choices: - - 'normal' - - 'good' - - 'warning' - - 'danger' - attachments: - type: list - elements: dict - description: - - Define a list of attachments. -''' - -EXAMPLES = """ -- name: Send notification message via Rocket Chat - community.general.rocketchat: - token: thetoken/generatedby/rocketchat - domain: chat.example.com - msg: '{{ inventory_hostname }} completed' - delegate_to: localhost - -- name: Send notification message via Rocket Chat all options - community.general.rocketchat: - domain: chat.example.com - token: thetoken/generatedby/rocketchat - msg: '{{ inventory_hostname }} completed' - channel: #ansible - username: 'Ansible on {{ inventory_hostname }}' - icon_url: http://www.example.com/some-image-file.png - link_names: 0 - delegate_to: localhost - -- name: Insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in rocketchat - community.general.rocketchat: - token: thetoken/generatedby/rocketchat - domain: chat.example.com - msg: '{{ inventory_hostname }} is alive!' - color: good - username: '' - icon_url: '' - delegate_to: localhost - -- name: Use the attachments API - community.general.rocketchat: - token: thetoken/generatedby/rocketchat - domain: chat.example.com - attachments: - - text: Display my system load on host A and B - color: #ff00dd - title: System load - fields: - - title: System A - value: 'load average: 0,74, 0,66, 0,63' - short: True - - title: System B - value: 'load average: 5,16, 4,64, 2,43' - short: True - delegate_to: localhost -""" - -RETURN = """ -changed: - description: A flag indicating if any change was made or not. - returned: success - type: bool - sample: false -""" - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url - - -ROCKETCHAT_INCOMING_WEBHOOK = '%s://%s/hooks/%s' - - -def build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments): - payload = {} - if color == "normal" and text is not None: - payload = dict(text=text) - elif text is not None: - payload = dict(attachments=[dict(text=text, color=color)]) - if channel is not None: - if (channel[0] == '#') or (channel[0] == '@'): - payload['channel'] = channel - else: - payload['channel'] = '#' + channel - if username is not None: - payload['username'] = username - if icon_emoji is not None: - payload['icon_emoji'] = icon_emoji - else: - payload['icon_url'] = icon_url - if link_names is not None: - payload['link_names'] = link_names - - if attachments is not None: - if 'attachments' not in payload: - payload['attachments'] = [] - - if attachments is not None: - for attachment in attachments: - if 'fallback' not in attachment: - attachment['fallback'] = attachment['text'] - payload['attachments'].append(attachment) - - payload = "payload=" + module.jsonify(payload) - return payload - - -def do_notify_rocketchat(module, domain, token, protocol, payload): - - if token.count('/') < 1: - module.fail_json(msg="Invalid Token specified, provide a valid token") - - rocketchat_incoming_webhook = ROCKETCHAT_INCOMING_WEBHOOK % (protocol, domain, token) - - response, info = fetch_url(module, rocketchat_incoming_webhook, data=payload) - if info['status'] != 200: - module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - domain=dict(type='str', required=True), - token=dict(type='str', required=True, no_log=True), - protocol=dict(type='str', default='https', choices=['http', 'https']), - msg=dict(type='str', required=False), - channel=dict(type='str'), - username=dict(type='str', default='Ansible'), - icon_url=dict(type='str', default='https://www.ansible.com/favicon.ico'), - icon_emoji=dict(type='str'), - link_names=dict(type='int', default=1, choices=[0, 1]), - validate_certs=dict(default=True, type='bool'), - color=dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']), - attachments=dict(type='list', elements='dict', required=False) - ) - ) - - domain = module.params['domain'] - token = module.params['token'] - protocol = module.params['protocol'] - text = module.params['msg'] - channel = module.params['channel'] - username = module.params['username'] - icon_url = module.params['icon_url'] - icon_emoji = module.params['icon_emoji'] - link_names = module.params['link_names'] - color = module.params['color'] - attachments = module.params['attachments'] - - payload = build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments) - do_notify_rocketchat(module, domain, token, protocol, payload) - - module.exit_json(msg="OK") - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/notification/say.py b/ansible_collections/community/general/plugins/modules/notification/say.py deleted file mode 100644 index 1c66adf6..00000000 --- a/ansible_collections/community/general/plugins/modules/notification/say.py +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Michael DeHaan -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: say -short_description: Makes a computer to speak. -description: - - makes a computer speak! Amuse your friends, annoy your coworkers! -notes: - - In 2.5, this module has been renamed from C(osx_say) to M(community.general.say). - - If you like this module, you may also be interested in the osx_say callback plugin. - - A list of available voices, with language, can be found by running C(say -v ?) on a OSX host and C(espeak --voices) on a Linux host. -options: - msg: - type: str - description: - What to say - required: true - voice: - type: str - description: - What voice to use - required: false -requirements: [ say or espeak or espeak-ng ] -author: - - "Ansible Core Team" - - "Michael DeHaan (@mpdehaan)" -''' - -EXAMPLES = ''' -- name: Makes a computer to speak - community.general.say: - msg: '{{ inventory_hostname }} is all done' - voice: Zarvox - delegate_to: localhost -''' -import platform - -from ansible.module_utils.basic import AnsibleModule - - -def say(module, executable, msg, voice): - cmd = [executable, msg] - if voice: - cmd.extend(('-v', voice)) - module.run_command(cmd, check_rc=True) - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - msg=dict(required=True), - voice=dict(required=False), - ), - supports_check_mode=True - ) - - msg = module.params['msg'] - voice = module.params['voice'] - possibles = ('say', 'espeak', 'espeak-ng') - - if platform.system() != 'Darwin': - # 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter - voice = None - - for possible in possibles: - executable = module.get_bin_path(possible) - if executable: - break - else: - module.fail_json(msg='Unable to find either %s' % ', '.join(possibles)) - - if module.check_mode: - module.exit_json(msg=msg, changed=False) - - say(module, executable, msg, voice) - - module.exit_json(msg=msg, changed=True) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/notification/sendgrid.py b/ansible_collections/community/general/plugins/modules/notification/sendgrid.py deleted file mode 100644 index 2c349064..00000000 --- a/ansible_collections/community/general/plugins/modules/notification/sendgrid.py +++ /dev/null @@ -1,272 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2015, Matt Makai -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: sendgrid -short_description: Sends an email with the SendGrid API -description: - - "Sends an email with a SendGrid account through their API, not through - the SMTP service." -notes: - - "This module is non-idempotent because it sends an email through the - external API. It is idempotent only in the case that the module fails." - - "Like the other notification modules, this one requires an external - dependency to work. In this case, you'll need an active SendGrid - account." - - "In order to use api_key, cc, bcc, attachments, from_name, html_body, headers - you must pip install sendgrid" - - "since 2.2 I(username) and I(password) are not required if you supply an I(api_key)" -requirements: - - sendgrid Python library 1.6.22 or lower (Sendgrid API V2 supported) -options: - username: - type: str - description: - - Username for logging into the SendGrid account. - - Since 2.2 it is only required if I(api_key) is not supplied. - password: - type: str - description: - - Password that corresponds to the username. - - Since 2.2 it is only required if I(api_key) is not supplied. - from_address: - type: str - description: - - The address in the "from" field for the email. - required: true - to_addresses: - type: list - elements: str - description: - - A list with one or more recipient email addresses. - required: true - subject: - type: str - description: - - The desired subject for the email. - required: true - api_key: - type: str - description: - - Sendgrid API key to use instead of username/password. - cc: - type: list - elements: str - description: - - A list of email addresses to cc. - bcc: - type: list - elements: str - description: - - A list of email addresses to bcc. - attachments: - type: list - elements: path - description: - - A list of relative or explicit paths of files you want to attach (7MB limit as per SendGrid docs). - from_name: - type: str - description: - - The name you want to appear in the from field, i.e 'John Doe'. - html_body: - description: - - Whether the body is html content that should be rendered. - type: bool - default: 'no' - headers: - type: dict - description: - - A dict to pass on as headers. - body: - type: str - description: - - The e-mail body content. - required: yes -author: "Matt Makai (@makaimc)" -''' - -EXAMPLES = r''' -- name: Send an email to a single recipient that the deployment was successful - community.general.sendgrid: - username: "{{ sendgrid_username }}" - password: "{{ sendgrid_password }}" - from_address: "ansible@mycompany.com" - to_addresses: - - "ops@mycompany.com" - subject: "Deployment success." - body: "The most recent Ansible deployment was successful." - delegate_to: localhost - -- name: Send an email to more than one recipient that the build failed - community.general.sendgrid: - username: "{{ sendgrid_username }}" - password: "{{ sendgrid_password }}" - from_address: "build@mycompany.com" - to_addresses: - - "ops@mycompany.com" - - "devteam@mycompany.com" - subject: "Build failure!." - body: "Unable to pull source repository from Git server." - delegate_to: localhost -''' - -# ======================================= -# sendgrid module support methods -# -import os -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -SENDGRID_IMP_ERR = None -try: - import sendgrid - HAS_SENDGRID = True -except ImportError: - SENDGRID_IMP_ERR = traceback.format_exc() - HAS_SENDGRID = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.common.text.converters import to_bytes -from ansible.module_utils.urls import fetch_url - - -def post_sendgrid_api(module, username, password, from_address, to_addresses, - subject, body, api_key=None, cc=None, bcc=None, attachments=None, - html_body=False, from_name=None, headers=None): - - if not HAS_SENDGRID: - SENDGRID_URI = "https://api.sendgrid.com/api/mail.send.json" - AGENT = "Ansible" - data = {'api_user': username, 'api_key': password, - 'from': from_address, 'subject': subject, 'text': body} - encoded_data = urlencode(data) - to_addresses_api = '' - for recipient in to_addresses: - recipient = to_bytes(recipient, errors='surrogate_or_strict') - to_addresses_api += '&to[]=%s' % recipient - encoded_data += to_addresses_api - - headers = {'User-Agent': AGENT, - 'Content-type': 'application/x-www-form-urlencoded', - 'Accept': 'application/json'} - return fetch_url(module, SENDGRID_URI, data=encoded_data, headers=headers, method='POST') - else: - # Remove this check when adding Sendgrid API v3 support - if LooseVersion(sendgrid.version.__version__) > LooseVersion("1.6.22"): - module.fail_json(msg="Please install sendgrid==1.6.22 or lower since module uses Sendgrid V2 APIs.") - - if api_key: - sg = sendgrid.SendGridClient(api_key) - else: - sg = sendgrid.SendGridClient(username, password) - - message = sendgrid.Mail() - message.set_subject(subject) - - for recip in to_addresses: - message.add_to(recip) - - if cc: - for recip in cc: - message.add_cc(recip) - if bcc: - for recip in bcc: - message.add_bcc(recip) - - if headers: - message.set_headers(headers) - - if attachments: - for f in attachments: - name = os.path.basename(f) - message.add_attachment(name, f) - - if from_name: - message.set_from('%s <%s.' % (from_name, from_address)) - else: - message.set_from(from_address) - - if html_body: - message.set_html(body) - else: - message.set_text(body) - - return sg.send(message) -# ======================================= -# Main -# - - -def main(): - module = AnsibleModule( - argument_spec=dict( - username=dict(required=False), - password=dict(required=False, no_log=True), - api_key=dict(required=False, no_log=True), - bcc=dict(required=False, type='list', elements='str'), - cc=dict(required=False, type='list', elements='str'), - headers=dict(required=False, type='dict'), - from_address=dict(required=True), - from_name=dict(required=False), - to_addresses=dict(required=True, type='list', elements='str'), - subject=dict(required=True), - body=dict(required=True), - html_body=dict(required=False, default=False, type='bool'), - attachments=dict(required=False, type='list', elements='path') - ), - supports_check_mode=True, - mutually_exclusive=[ - ['api_key', 'password'], - ['api_key', 'username'] - ], - required_together=[['username', 'password']], - ) - - username = module.params['username'] - password = module.params['password'] - api_key = module.params['api_key'] - bcc = module.params['bcc'] - cc = module.params['cc'] - headers = module.params['headers'] - from_name = module.params['from_name'] - from_address = module.params['from_address'] - to_addresses = module.params['to_addresses'] - subject = module.params['subject'] - body = module.params['body'] - html_body = module.params['html_body'] - attachments = module.params['attachments'] - - sendgrid_lib_args = [api_key, bcc, cc, headers, from_name, html_body, attachments] - - if any(lib_arg is not None for lib_arg in sendgrid_lib_args) and not HAS_SENDGRID: - reason = 'when using any of the following arguments: ' \ - 'api_key, bcc, cc, headers, from_name, html_body, attachments' - module.fail_json(msg=missing_required_lib('sendgrid', reason=reason), - exception=SENDGRID_IMP_ERR) - - response, info = post_sendgrid_api(module, username, password, - from_address, to_addresses, subject, body, attachments=attachments, - bcc=bcc, cc=cc, headers=headers, html_body=html_body, api_key=api_key) - - if not HAS_SENDGRID: - if info['status'] != 200: - module.fail_json(msg="unable to send email through SendGrid API: %s" % info['msg']) - else: - if response != 200: - module.fail_json(msg="unable to send email through SendGrid API: %s" % info['message']) - - module.exit_json(msg=subject, changed=False) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/notification/slack.py b/ansible_collections/community/general/plugins/modules/notification/slack.py deleted file mode 100644 index bdc839f9..00000000 --- a/ansible_collections/community/general/plugins/modules/notification/slack.py +++ /dev/null @@ -1,488 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2020, Lee Goolsbee -# (c) 2020, Michal Middleton -# (c) 2017, Steve Pletcher -# (c) 2016, René Moser -# (c) 2015, Stefan Berggren -# (c) 2014, Ramon de la Fuente -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = """ -module: slack -short_description: Send Slack notifications -description: - - The C(slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration -author: "Ramon de la Fuente (@ramondelafuente)" -options: - domain: - type: str - description: - - Slack (sub)domain for your environment without protocol. (i.e. - C(example.slack.com)) In 1.8 and beyond, this is deprecated and may - be ignored. See token documentation for information. - token: - type: str - description: - - Slack integration token. This authenticates you to the slack service. - Make sure to use the correct type of token, depending on what method you use. - - "Webhook token: - Prior to 1.8, a token looked like C(3Ffe373sfhRE6y42Fg3rvf4GlK). In - 1.8 and above, ansible adapts to the new slack API where tokens look - like C(G922VJP24/D921DW937/3Ffe373sfhRE6y42Fg3rvf4GlK). If tokens - are in the new format then slack will ignore any value of domain. If - the token is in the old format the domain is required. Ansible has no - control of when slack will get rid of the old API. When slack does - that the old format will stop working. ** Please keep in mind the tokens - are not the API tokens but are the webhook tokens. In slack these are - found in the webhook URL which are obtained under the apps and integrations. - The incoming webhooks can be added in that area. In some cases this may - be locked by your Slack admin and you must request access. It is there - that the incoming webhooks can be added. The key is on the end of the - URL given to you in that section." - - "WebAPI token: - Slack WebAPI requires a personal, bot or work application token. These tokens start with C(xoxp-), C(xoxb-) - or C(xoxa-), eg. C(xoxb-1234-56789abcdefghijklmnop). WebAPI token is required if you intend to receive thread_id. - See Slack's documentation (U(https://api.slack.com/docs/token-types)) for more information." - required: true - msg: - type: str - description: - - Message to send. Note that the module does not handle escaping characters. - Plain-text angle brackets and ampersands should be converted to HTML entities (e.g. & to &) before sending. - See Slack's documentation (U(https://api.slack.com/docs/message-formatting)) for more. - channel: - type: str - description: - - Channel to send the message to. If absent, the message goes to the channel selected for the I(token). - thread_id: - description: - - Optional. Timestamp of parent message to thread this message. https://api.slack.com/docs/message-threading - type: str - message_id: - description: - - Optional. Message ID to edit, instead of posting a new message. - Corresponds to C(ts) in the Slack API (U(https://api.slack.com/messaging/modifying)). - type: str - version_added: 1.2.0 - username: - type: str - description: - - This is the sender of the message. - default: "Ansible" - icon_url: - type: str - description: - - Url for the message sender's icon (default C(https://www.ansible.com/favicon.ico)) - default: https://www.ansible.com/favicon.ico - icon_emoji: - type: str - description: - - Emoji for the message sender. See Slack documentation for options. - (if I(icon_emoji) is set, I(icon_url) will not be used) - link_names: - type: int - description: - - Automatically create links for channels and usernames in I(msg). - default: 1 - choices: - - 1 - - 0 - parse: - type: str - description: - - Setting for the message parser at Slack - choices: - - 'full' - - 'none' - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - type: bool - default: 'yes' - color: - type: str - description: - - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message. - - Allowed values for color can be one of 'normal', 'good', 'warning', 'danger', any valid 3 digit or 6 digit hex color value. - - Specifying value in hex is supported since Ansible 2.8. - default: 'normal' - attachments: - type: list - elements: dict - description: - - Define a list of attachments. This list mirrors the Slack JSON API. - - For more information, see U(https://api.slack.com/docs/attachments). - blocks: - description: - - Define a list of blocks. This list mirrors the Slack JSON API. - - For more information, see U(https://api.slack.com/block-kit). - type: list - elements: dict - version_added: 1.0.0 -""" - -EXAMPLES = """ -- name: Send notification message via Slack - community.general.slack: - token: thetoken/generatedby/slack - msg: '{{ inventory_hostname }} completed' - delegate_to: localhost - -- name: Send notification message via Slack all options - community.general.slack: - token: thetoken/generatedby/slack - msg: '{{ inventory_hostname }} completed' - channel: '#ansible' - thread_id: '1539917263.000100' - username: 'Ansible on {{ inventory_hostname }}' - icon_url: http://www.example.com/some-image-file.png - link_names: 0 - parse: 'none' - delegate_to: localhost - -- name: Insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in Slack - community.general.slack: - token: thetoken/generatedby/slack - msg: '{{ inventory_hostname }} is alive!' - color: good - username: '' - icon_url: '' - -- name: Insert a color bar in front of the message with valid hex color value - community.general.slack: - token: thetoken/generatedby/slack - msg: 'This message uses color in hex value' - color: '#00aacc' - username: '' - icon_url: '' - -- name: Use the attachments API - community.general.slack: - token: thetoken/generatedby/slack - attachments: - - text: Display my system load on host A and B - color: '#ff00dd' - title: System load - fields: - - title: System A - value: "load average: 0,74, 0,66, 0,63" - short: True - - title: System B - value: 'load average: 5,16, 4,64, 2,43' - short: True - -- name: Use the blocks API - community.general.slack: - token: thetoken/generatedby/slack - blocks: - - type: section - text: - type: mrkdwn - text: |- - *System load* - Display my system load on host A and B - - type: context - elements: - - type: mrkdwn - text: |- - *System A* - load average: 0,74, 0,66, 0,63 - - type: mrkdwn - text: |- - *System B* - load average: 5,16, 4,64, 2,43 - -- name: Send a message with a link using Slack markup - community.general.slack: - token: thetoken/generatedby/slack - msg: We sent this message using ! - -- name: Send a message with angle brackets and ampersands - community.general.slack: - token: thetoken/generatedby/slack - msg: This message has <brackets> & ampersands in plain text. - -- name: Initial Threaded Slack message - community.general.slack: - channel: '#ansible' - token: xoxb-1234-56789abcdefghijklmnop - msg: 'Starting a thread with my initial post.' - register: slack_response -- name: Add more info to thread - community.general.slack: - channel: '#ansible' - token: xoxb-1234-56789abcdefghijklmnop - thread_id: "{{ slack_response['ts'] }}" - color: good - msg: 'And this is my threaded response!' - -- name: Send a message to be edited later on - community.general.slack: - token: thetoken/generatedby/slack - channel: '#ansible' - msg: Deploying something... - register: slack_response -- name: Edit message - community.general.slack: - token: thetoken/generatedby/slack - channel: "{{ slack_response.channel }}" - msg: Deployment complete! - message_id: "{{ slack_response.ts }}" -""" - -import re -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.urls import fetch_url - -OLD_SLACK_INCOMING_WEBHOOK = 'https://%s/services/hooks/incoming-webhook?token=%s' -SLACK_INCOMING_WEBHOOK = 'https://hooks.slack.com/services/%s' -SLACK_POSTMESSAGE_WEBAPI = 'https://slack.com/api/chat.postMessage' -SLACK_UPDATEMESSAGE_WEBAPI = 'https://slack.com/api/chat.update' -SLACK_CONVERSATIONS_HISTORY_WEBAPI = 'https://slack.com/api/conversations.history' - -# Escaping quotes and apostrophes to avoid ending string prematurely in ansible call. -# We do not escape other characters used as Slack metacharacters (e.g. &, <, >). -escape_table = { - '"': "\"", - "'": "\'", -} - - -def is_valid_hex_color(color_choice): - if re.match(r'^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$', color_choice): - return True - return False - - -def escape_quotes(text): - """Backslash any quotes within text.""" - return "".join(escape_table.get(c, c) for c in text) - - -def recursive_escape_quotes(obj, keys): - """Recursively escape quotes inside supplied keys inside block kit objects""" - if isinstance(obj, dict): - escaped = {} - for k, v in obj.items(): - if isinstance(v, str) and k in keys: - escaped[k] = escape_quotes(v) - else: - escaped[k] = recursive_escape_quotes(v, keys) - elif isinstance(obj, list): - escaped = [recursive_escape_quotes(v, keys) for v in obj] - else: - escaped = obj - return escaped - - -def build_payload_for_slack(text, channel, thread_id, username, icon_url, icon_emoji, link_names, - parse, color, attachments, blocks, message_id): - payload = {} - if color == "normal" and text is not None: - payload = dict(text=escape_quotes(text)) - elif text is not None: - # With a custom color we have to set the message as attachment, and explicitly turn markdown parsing on for it. - payload = dict(attachments=[dict(text=escape_quotes(text), color=color, mrkdwn_in=["text"])]) - if channel is not None: - if channel.startswith(('#', '@', 'C0')): - payload['channel'] = channel - else: - payload['channel'] = '#' + channel - if thread_id is not None: - payload['thread_ts'] = thread_id - if username is not None: - payload['username'] = username - if icon_emoji is not None: - payload['icon_emoji'] = icon_emoji - else: - payload['icon_url'] = icon_url - if link_names is not None: - payload['link_names'] = link_names - if parse is not None: - payload['parse'] = parse - if message_id is not None: - payload['ts'] = message_id - - if attachments is not None: - if 'attachments' not in payload: - payload['attachments'] = [] - - if attachments is not None: - attachment_keys_to_escape = [ - 'title', - 'text', - 'author_name', - 'pretext', - 'fallback', - ] - for attachment in attachments: - for key in attachment_keys_to_escape: - if key in attachment: - attachment[key] = escape_quotes(attachment[key]) - - if 'fallback' not in attachment: - attachment['fallback'] = attachment['text'] - - payload['attachments'].append(attachment) - - if blocks is not None: - block_keys_to_escape = [ - 'text', - 'alt_text' - ] - payload['blocks'] = recursive_escape_quotes(blocks, block_keys_to_escape) - - return payload - - -def get_slack_message(module, token, channel, ts): - headers = { - 'Content-Type': 'application/json; charset=UTF-8', - 'Accept': 'application/json', - 'Authorization': 'Bearer ' + token - } - qs = urlencode({ - 'channel': channel, - 'ts': ts, - 'limit': 1, - 'inclusive': 'true', - }) - url = SLACK_CONVERSATIONS_HISTORY_WEBAPI + '?' + qs - response, info = fetch_url(module=module, url=url, headers=headers, method='GET') - if info['status'] != 200: - module.fail_json(msg="failed to get slack message") - data = module.from_json(response.read()) - if len(data['messages']) < 1: - module.fail_json(msg="no messages matching ts: %s" % ts) - if len(data['messages']) > 1: - module.fail_json(msg="more than 1 message matching ts: %s" % ts) - return data['messages'][0] - - -def do_notify_slack(module, domain, token, payload): - use_webapi = False - if token.count('/') >= 2: - # New style webhook token - slack_uri = SLACK_INCOMING_WEBHOOK % token - elif re.match(r'^xox[abp]-\S+$', token): - slack_uri = SLACK_UPDATEMESSAGE_WEBAPI if 'ts' in payload else SLACK_POSTMESSAGE_WEBAPI - use_webapi = True - else: - if not domain: - module.fail_json(msg="Slack has updated its webhook API. You need to specify a token of the form " - "XXXX/YYYY/ZZZZ in your playbook") - slack_uri = OLD_SLACK_INCOMING_WEBHOOK % (domain, token) - - headers = { - 'Content-Type': 'application/json; charset=UTF-8', - 'Accept': 'application/json', - } - if use_webapi: - headers['Authorization'] = 'Bearer ' + token - - data = module.jsonify(payload) - response, info = fetch_url(module=module, url=slack_uri, headers=headers, method='POST', data=data) - - if info['status'] != 200: - if use_webapi: - obscured_incoming_webhook = slack_uri - else: - obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % '[obscured]' - module.fail_json(msg=" failed to send %s to %s: %s" % (data, obscured_incoming_webhook, info['msg'])) - - # each API requires different handling - if use_webapi: - return module.from_json(response.read()) - else: - return {'webhook': 'ok'} - - -def main(): - module = AnsibleModule( - argument_spec=dict( - domain=dict(type='str'), - token=dict(type='str', required=True, no_log=True), - msg=dict(type='str'), - channel=dict(type='str'), - thread_id=dict(type='str'), - username=dict(type='str', default='Ansible'), - icon_url=dict(type='str', default='https://www.ansible.com/favicon.ico'), - icon_emoji=dict(type='str'), - link_names=dict(type='int', default=1, choices=[0, 1]), - parse=dict(type='str', choices=['none', 'full']), - validate_certs=dict(default=True, type='bool'), - color=dict(type='str', default='normal'), - attachments=dict(type='list', elements='dict'), - blocks=dict(type='list', elements='dict'), - message_id=dict(type='str'), - ), - supports_check_mode=True, - ) - - domain = module.params['domain'] - token = module.params['token'] - text = module.params['msg'] - channel = module.params['channel'] - thread_id = module.params['thread_id'] - username = module.params['username'] - icon_url = module.params['icon_url'] - icon_emoji = module.params['icon_emoji'] - link_names = module.params['link_names'] - parse = module.params['parse'] - color = module.params['color'] - attachments = module.params['attachments'] - blocks = module.params['blocks'] - message_id = module.params['message_id'] - - color_choices = ['normal', 'good', 'warning', 'danger'] - if color not in color_choices and not is_valid_hex_color(color): - module.fail_json(msg="Color value specified should be either one of %r " - "or any valid hex value with length 3 or 6." % color_choices) - - changed = True - - # if updating an existing message, we can check if there's anything to update - if message_id is not None: - changed = False - msg = get_slack_message(module, token, channel, message_id) - for key in ('icon_url', 'icon_emoji', 'link_names', 'color', 'attachments', 'blocks'): - if msg.get(key) != module.params.get(key): - changed = True - break - # if check mode is active, we shouldn't do anything regardless. - # if changed=False, we don't need to do anything, so don't do it. - if module.check_mode or not changed: - module.exit_json(changed=changed, ts=msg['ts'], channel=msg['channel']) - elif module.check_mode: - module.exit_json(changed=changed) - - payload = build_payload_for_slack(text, channel, thread_id, username, icon_url, icon_emoji, link_names, - parse, color, attachments, blocks, message_id) - slack_response = do_notify_slack(module, domain, token, payload) - - if 'ok' in slack_response: - # Evaluate WebAPI response - if slack_response['ok']: - # return payload as a string for backwards compatibility - payload_json = module.jsonify(payload) - module.exit_json(changed=changed, ts=slack_response['ts'], channel=slack_response['channel'], - api=slack_response, payload=payload_json) - else: - module.fail_json(msg="Slack API error", error=slack_response['error']) - else: - # Exit with plain OK from WebHook, since we don't have more information - # If we get 200 from webhook, the only answer is OK - module.exit_json(msg="OK") - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/notification/syslogger.py b/ansible_collections/community/general/plugins/modules/notification/syslogger.py deleted file mode 100644 index 7627f359..00000000 --- a/ansible_collections/community/general/plugins/modules/notification/syslogger.py +++ /dev/null @@ -1,190 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Tim Rightnour -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: syslogger -short_description: Log messages in the syslog -description: - - Uses syslog to add log entries to the host. -options: - msg: - type: str - description: - - This is the message to place in syslog. - required: True - priority: - type: str - description: - - Set the log priority. - choices: [ "emerg", "alert", "crit", "err", "warning", "notice", "info", "debug" ] - default: "info" - facility: - type: str - description: - - Set the log facility. - choices: [ "kern", "user", "mail", "daemon", "auth", "lpr", "news", - "uucp", "cron", "syslog", "local0", "local1", "local2", - "local3", "local4", "local5", "local6", "local7" ] - default: "daemon" - log_pid: - description: - - Log the PID in brackets. - type: bool - default: False - ident: - description: - - Specify the name of application name which is sending the log to syslog. - type: str - default: 'ansible_syslogger' - version_added: '0.2.0' -author: - - Tim Rightnour (@garbled1) -''' - -EXAMPLES = r''' -- name: Simple Usage - community.general.syslogger: - msg: "I will end up as daemon.info" - -- name: Send a log message with err priority and user facility with log_pid - community.general.syslogger: - msg: "Hello from Ansible" - priority: "err" - facility: "user" - log_pid: true - -- name: Specify the name of application which is sending log message - community.general.syslogger: - ident: "MyApp" - msg: "I want to believe" - priority: "alert" -''' - -RETURN = r''' -ident: - description: Name of application sending the message to log - returned: always - type: str - sample: "ansible_syslogger" - version_added: '0.2.0' -priority: - description: Priority level - returned: always - type: str - sample: "daemon" -facility: - description: Syslog facility - returned: always - type: str - sample: "info" -log_pid: - description: Log PID status - returned: always - type: bool - sample: True -msg: - description: Message sent to syslog - returned: always - type: str - sample: "Hello from Ansible" -''' - -import syslog -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - - -def get_facility(facility): - return { - 'kern': syslog.LOG_KERN, - 'user': syslog.LOG_USER, - 'mail': syslog.LOG_MAIL, - 'daemon': syslog.LOG_DAEMON, - 'auth': syslog.LOG_AUTH, - 'lpr': syslog.LOG_LPR, - 'news': syslog.LOG_NEWS, - 'uucp': syslog.LOG_UUCP, - 'cron': syslog.LOG_CRON, - 'syslog': syslog.LOG_SYSLOG, - 'local0': syslog.LOG_LOCAL0, - 'local1': syslog.LOG_LOCAL1, - 'local2': syslog.LOG_LOCAL2, - 'local3': syslog.LOG_LOCAL3, - 'local4': syslog.LOG_LOCAL4, - 'local5': syslog.LOG_LOCAL5, - 'local6': syslog.LOG_LOCAL6, - 'local7': syslog.LOG_LOCAL7 - }.get(facility, syslog.LOG_DAEMON) - - -def get_priority(priority): - return { - 'emerg': syslog.LOG_EMERG, - 'alert': syslog.LOG_ALERT, - 'crit': syslog.LOG_CRIT, - 'err': syslog.LOG_ERR, - 'warning': syslog.LOG_WARNING, - 'notice': syslog.LOG_NOTICE, - 'info': syslog.LOG_INFO, - 'debug': syslog.LOG_DEBUG - }.get(priority, syslog.LOG_INFO) - - -def main(): - # define the available arguments/parameters that a user can pass to - # the module - module_args = dict( - ident=dict(type='str', default='ansible_syslogger'), - msg=dict(type='str', required=True), - priority=dict(type='str', required=False, - choices=["emerg", "alert", "crit", "err", "warning", - "notice", "info", "debug"], - default='info'), - facility=dict(type='str', required=False, - choices=["kern", "user", "mail", "daemon", "auth", - "lpr", "news", "uucp", "cron", "syslog", - "local0", "local1", "local2", "local3", - "local4", "local5", "local6", "local7"], - default='daemon'), - log_pid=dict(type='bool', required=False, default=False) - ) - - module = AnsibleModule( - argument_spec=module_args, - ) - - result = dict( - changed=False, - ident=module.params['ident'], - priority=module.params['priority'], - facility=module.params['facility'], - log_pid=module.params['log_pid'], - msg=module.params['msg'] - ) - - # do the logging - try: - syslog.openlog(module.params['ident'], - syslog.LOG_PID if module.params['log_pid'] else 0, - get_facility(module.params['facility'])) - syslog.syslog(get_priority(module.params['priority']), - module.params['msg']) - syslog.closelog() - result['changed'] = True - - except Exception as exc: - module.fail_json(error='Failed to write to syslog %s' % to_native(exc), exception=traceback.format_exc(), **result) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/notification/telegram.py b/ansible_collections/community/general/plugins/modules/notification/telegram.py deleted file mode 100644 index 4960874d..00000000 --- a/ansible_collections/community/general/plugins/modules/notification/telegram.py +++ /dev/null @@ -1,138 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2016, Artem Feofanov -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' - -module: telegram -author: - - "Artem Feofanov (@tyouxa)" - - "Nikolai Lomov (@lomserman)" - -short_description: module for sending notifications via telegram - -description: - - Send notifications via telegram bot, to a verified group or user. - - Also, the user may try to use any other telegram bot API method, if you specify I(api_method) argument. -notes: - - You will require a telegram account and create telegram bot to use this module. -options: - token: - type: str - description: - - Token identifying your telegram bot. - required: true - api_method: - type: str - description: - - Bot API method. - - For reference, see U(https://core.telegram.org/bots/api). - default: SendMessage - version_added: 2.0.0 - api_args: - type: dict - description: - - Any parameters for the method. - - For reference to default method, C(SendMessage), see U(https://core.telegram.org/bots/api#sendmessage). - version_added: 2.0.0 - -''' - -EXAMPLES = """ - -- name: Send notify to Telegram - community.general.telegram: - token: '9999999:XXXXXXXXXXXXXXXXXXXXXXX' - api_args: - chat_id: 000000 - parse_mode: "markdown" - text: "Your precious application has been deployed: https://example.com" - disable_web_page_preview: True - disable_notification: True - -- name: Forward message to someone - community.general.telegram: - token: '9999999:XXXXXXXXXXXXXXXXXXXXXXX' - api_method: forwardMessage - api_args: - chat_id: 000000 - from_chat_id: 111111 - disable_notification: True - message_id: '{{ saved_msg_id }}' -""" - -RETURN = """ - -msg: - description: The message you attempted to send - returned: success - type: str - sample: "Ansible task finished" -telegram_error: - description: Error message gotten from Telegram API - returned: failure - type: str - sample: "Bad Request: message text is empty" -""" - -import json - -from ansible.module_utils.basic import AnsibleModule -# noinspection PyUnresolvedReferences -from ansible.module_utils.six.moves.urllib.parse import quote -from ansible.module_utils.urls import fetch_url - - -def main(): - module = AnsibleModule( - argument_spec=dict( - token=dict(type='str', required=True, no_log=True), - api_args=dict(type='dict'), - api_method=dict(type="str", default="SendMessage"), - ), - supports_check_mode=True - ) - - token = quote(module.params.get('token')) - api_args = module.params.get('api_args') or {} - api_method = module.params.get('api_method') - # filling backward compatibility args - api_args['chat_id'] = api_args.get('chat_id') - api_args['parse_mode'] = api_args.get('parse_mode') - api_args['text'] = api_args.get('text') - - if api_args['parse_mode'] == 'plain': - del api_args['parse_mode'] - - url = 'https://api.telegram.org/bot{token}/{api_method}'.format(token=token, api_method=api_method) - - if module.check_mode: - module.exit_json(changed=False) - - response, info = fetch_url(module, url, method="POST", data=json.dumps(api_args), - headers={'Content-Type': 'application/json'}) - if info['status'] == 200: - module.exit_json(changed=True) - elif info['status'] == -1: - # SSL errors, connection problems, etc. - module.fail_json(msg="Failed to send message", info=info, response=response) - else: - body = json.loads(info['body']) - module.fail_json( - msg="Failed to send message, return status = {status}\n" - "url = {api_url}\n" - "api_args = {api_args}".format( - status=info['status'], api_url=url, api_args=api_args - ), - telegram_error=body['description'], - ) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/notification/twilio.py b/ansible_collections/community/general/plugins/modules/notification/twilio.py deleted file mode 100644 index 88851a6a..00000000 --- a/ansible_collections/community/general/plugins/modules/notification/twilio.py +++ /dev/null @@ -1,174 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2015, Matt Makai -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: twilio -short_description: Sends a text message to a mobile phone through Twilio. -description: - - Sends a text message to a phone number through the Twilio messaging API. -notes: - - This module is non-idempotent because it sends an email through the - external API. It is idempotent only in the case that the module fails. - - Like the other notification modules, this one requires an external - dependency to work. In this case, you'll need a Twilio account with - a purchased or verified phone number to send the text message. -options: - account_sid: - type: str - description: - user's Twilio account token found on the account page - required: true - auth_token: - type: str - description: user's Twilio authentication token - required: true - msg: - type: str - description: - the body of the text message - required: true - to_numbers: - type: list - elements: str - description: - one or more phone numbers to send the text message to, - format +15551112222 - required: true - aliases: [ to_number ] - from_number: - type: str - description: - the Twilio number to send the text message from, format +15551112222 - required: true - media_url: - type: str - description: - a URL with a picture, video or sound clip to send with an MMS - (multimedia message) instead of a plain SMS - required: false - -author: "Matt Makai (@makaimc)" -''' - -EXAMPLES = ''' -# send an SMS about the build status to (555) 303 5681 -# note: replace account_sid and auth_token values with your credentials -# and you have to have the 'from_number' on your Twilio account -- name: Send a text message to a mobile phone through Twilio - community.general.twilio: - msg: All servers with webserver role are now configured. - account_sid: ACXXXXXXXXXXXXXXXXX - auth_token: ACXXXXXXXXXXXXXXXXX - from_number: +15552014545 - to_number: +15553035681 - delegate_to: localhost - -# send an SMS to multiple phone numbers about the deployment -# note: replace account_sid and auth_token values with your credentials -# and you have to have the 'from_number' on your Twilio account -- name: Send a text message to a mobile phone through Twilio - community.general.twilio: - msg: This server configuration is now complete. - account_sid: ACXXXXXXXXXXXXXXXXX - auth_token: ACXXXXXXXXXXXXXXXXX - from_number: +15553258899 - to_numbers: - - +15551113232 - - +12025551235 - - +19735559010 - delegate_to: localhost - -# send an MMS to a single recipient with an update on the deployment -# and an image of the results -# note: replace account_sid and auth_token values with your credentials -# and you have to have the 'from_number' on your Twilio account -- name: Send a text message to a mobile phone through Twilio - community.general.twilio: - msg: Deployment complete! - account_sid: ACXXXXXXXXXXXXXXXXX - auth_token: ACXXXXXXXXXXXXXXXXX - from_number: +15552014545 - to_number: +15553035681 - media_url: https://demo.twilio.com/logo.png - delegate_to: localhost -''' - -# ======================================= -# twilio module support methods -# -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.urls import fetch_url - - -def post_twilio_api(module, account_sid, auth_token, msg, from_number, - to_number, media_url=None): - URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \ - % (account_sid,) - AGENT = "Ansible" - - data = {'From': from_number, 'To': to_number, 'Body': msg} - if media_url: - data['MediaUrl'] = media_url - encoded_data = urlencode(data) - - headers = {'User-Agent': AGENT, - 'Content-type': 'application/x-www-form-urlencoded', - 'Accept': 'application/json', - } - - # Hack module params to have the Basic auth params that fetch_url expects - module.params['url_username'] = account_sid.replace('\n', '') - module.params['url_password'] = auth_token.replace('\n', '') - - return fetch_url(module, URI, data=encoded_data, headers=headers) - - -# ======================================= -# Main -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - account_sid=dict(required=True), - auth_token=dict(required=True, no_log=True), - msg=dict(required=True), - from_number=dict(required=True), - to_numbers=dict(required=True, aliases=['to_number'], type='list', elements='str'), - media_url=dict(default=None, required=False), - ), - supports_check_mode=True - ) - - account_sid = module.params['account_sid'] - auth_token = module.params['auth_token'] - msg = module.params['msg'] - from_number = module.params['from_number'] - to_numbers = module.params['to_numbers'] - media_url = module.params['media_url'] - - for number in to_numbers: - r, info = post_twilio_api(module, account_sid, auth_token, msg, - from_number, number, media_url) - if info['status'] not in [200, 201]: - body_message = "unknown error" - if 'body' in info: - body = module.from_json(info['body']) - body_message = body['message'] - module.fail_json(msg="unable to send message to %s: %s" % (number, body_message)) - - module.exit_json(msg=msg, changed=False) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/notification/typetalk.py b/ansible_collections/community/general/plugins/modules/notification/typetalk.py deleted file mode 100644 index 6f8e4e8b..00000000 --- a/ansible_collections/community/general/plugins/modules/notification/typetalk.py +++ /dev/null @@ -1,128 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: typetalk -short_description: Send a message to typetalk -description: - - Send a message to typetalk using typetalk API -options: - client_id: - type: str - description: - - OAuth2 client ID - required: true - client_secret: - type: str - description: - - OAuth2 client secret - required: true - topic: - type: int - description: - - topic id to post message - required: true - msg: - type: str - description: - - message body - required: true -requirements: [ json ] -author: "Takashi Someda (@tksmd)" -''' - -EXAMPLES = ''' -- name: Send a message to typetalk - community.general.typetalk: - client_id: 12345 - client_secret: 12345 - topic: 1 - msg: install completed -''' - -import json - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.urls import fetch_url, ConnectionError - - -def do_request(module, url, params, headers=None): - data = urlencode(params) - if headers is None: - headers = dict() - headers = dict(headers, **{ - 'User-Agent': 'Ansible/typetalk module', - }) - r, info = fetch_url(module, url, data=data, headers=headers) - if info['status'] != 200: - exc = ConnectionError(info['msg']) - exc.code = info['status'] - raise exc - return r - - -def get_access_token(module, client_id, client_secret): - params = { - 'client_id': client_id, - 'client_secret': client_secret, - 'grant_type': 'client_credentials', - 'scope': 'topic.post' - } - res = do_request(module, 'https://typetalk.com/oauth2/access_token', params) - return json.load(res)['access_token'] - - -def send_message(module, client_id, client_secret, topic, msg): - """ - send message to typetalk - """ - try: - access_token = get_access_token(module, client_id, client_secret) - url = 'https://typetalk.com/api/v1/topics/%d' % topic - headers = { - 'Authorization': 'Bearer %s' % access_token, - } - do_request(module, url, {'message': msg}, headers) - return True, {'access_token': access_token} - except ConnectionError as e: - return False, e - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - client_id=dict(required=True), - client_secret=dict(required=True, no_log=True), - topic=dict(required=True, type='int'), - msg=dict(required=True), - ), - supports_check_mode=False - ) - - if not json: - module.fail_json(msg="json module is required") - - client_id = module.params["client_id"] - client_secret = module.params["client_secret"] - topic = module.params["topic"] - msg = module.params["msg"] - - res, error = send_message(module, client_id, client_secret, topic, msg) - if not res: - module.fail_json(msg='fail to send message with response code %s' % error.code) - - module.exit_json(changed=True, topic=topic, msg=msg) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/npm.py b/ansible_collections/community/general/plugins/modules/npm.py deleted file mode 120000 index 4cf0634e..00000000 --- a/ansible_collections/community/general/plugins/modules/npm.py +++ /dev/null @@ -1 +0,0 @@ -packaging/language/npm.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/nsupdate.py b/ansible_collections/community/general/plugins/modules/nsupdate.py deleted file mode 120000 index 1e2e4a55..00000000 --- a/ansible_collections/community/general/plugins/modules/nsupdate.py +++ /dev/null @@ -1 +0,0 @@ -net_tools/nsupdate.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/oci_vcn.py b/ansible_collections/community/general/plugins/modules/oci_vcn.py deleted file mode 120000 index f2154001..00000000 --- a/ansible_collections/community/general/plugins/modules/oci_vcn.py +++ /dev/null @@ -1 +0,0 @@ -cloud/oracle/oci_vcn.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/odbc.py b/ansible_collections/community/general/plugins/modules/odbc.py deleted file mode 120000 index aba60d76..00000000 --- a/ansible_collections/community/general/plugins/modules/odbc.py +++ /dev/null @@ -1 +0,0 @@ -database/misc/odbc.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/office_365_connector_card.py b/ansible_collections/community/general/plugins/modules/office_365_connector_card.py deleted file mode 120000 index 40f93f2a..00000000 --- a/ansible_collections/community/general/plugins/modules/office_365_connector_card.py +++ /dev/null @@ -1 +0,0 @@ -notification/office_365_connector_card.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ohai.py b/ansible_collections/community/general/plugins/modules/ohai.py deleted file mode 120000 index a019d66a..00000000 --- a/ansible_collections/community/general/plugins/modules/ohai.py +++ /dev/null @@ -1 +0,0 @@ -system/ohai.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/omapi_host.py b/ansible_collections/community/general/plugins/modules/omapi_host.py deleted file mode 120000 index 987f4811..00000000 --- a/ansible_collections/community/general/plugins/modules/omapi_host.py +++ /dev/null @@ -1 +0,0 @@ -net_tools/omapi_host.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/one_host.py b/ansible_collections/community/general/plugins/modules/one_host.py deleted file mode 120000 index 1a261a87..00000000 --- a/ansible_collections/community/general/plugins/modules/one_host.py +++ /dev/null @@ -1 +0,0 @@ -cloud/opennebula/one_host.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/one_image.py b/ansible_collections/community/general/plugins/modules/one_image.py deleted file mode 120000 index 6c594622..00000000 --- a/ansible_collections/community/general/plugins/modules/one_image.py +++ /dev/null @@ -1 +0,0 @@ -cloud/opennebula/one_image.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/one_image_info.py b/ansible_collections/community/general/plugins/modules/one_image_info.py deleted file mode 120000 index a15a2930..00000000 --- a/ansible_collections/community/general/plugins/modules/one_image_info.py +++ /dev/null @@ -1 +0,0 @@ -cloud/opennebula/one_image_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/one_service.py b/ansible_collections/community/general/plugins/modules/one_service.py deleted file mode 120000 index ca31a43b..00000000 --- a/ansible_collections/community/general/plugins/modules/one_service.py +++ /dev/null @@ -1 +0,0 @@ -cloud/opennebula/one_service.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/one_template.py b/ansible_collections/community/general/plugins/modules/one_template.py deleted file mode 120000 index 13e3710f..00000000 --- a/ansible_collections/community/general/plugins/modules/one_template.py +++ /dev/null @@ -1 +0,0 @@ -cloud/opennebula/one_template.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/one_vm.py b/ansible_collections/community/general/plugins/modules/one_vm.py deleted file mode 120000 index 20c25f3e..00000000 --- a/ansible_collections/community/general/plugins/modules/one_vm.py +++ /dev/null @@ -1 +0,0 @@ -cloud/opennebula/one_vm.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/oneandone_firewall_policy.py b/ansible_collections/community/general/plugins/modules/oneandone_firewall_policy.py deleted file mode 120000 index 28bed1ce..00000000 --- a/ansible_collections/community/general/plugins/modules/oneandone_firewall_policy.py +++ /dev/null @@ -1 +0,0 @@ -cloud/oneandone/oneandone_firewall_policy.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/oneandone_load_balancer.py b/ansible_collections/community/general/plugins/modules/oneandone_load_balancer.py deleted file mode 120000 index 16b03a05..00000000 --- a/ansible_collections/community/general/plugins/modules/oneandone_load_balancer.py +++ /dev/null @@ -1 +0,0 @@ -cloud/oneandone/oneandone_load_balancer.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/oneandone_monitoring_policy.py b/ansible_collections/community/general/plugins/modules/oneandone_monitoring_policy.py deleted file mode 120000 index 51aa7c42..00000000 --- a/ansible_collections/community/general/plugins/modules/oneandone_monitoring_policy.py +++ /dev/null @@ -1 +0,0 @@ -cloud/oneandone/oneandone_monitoring_policy.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/oneandone_private_network.py b/ansible_collections/community/general/plugins/modules/oneandone_private_network.py deleted file mode 120000 index 9c2d3d4c..00000000 --- a/ansible_collections/community/general/plugins/modules/oneandone_private_network.py +++ /dev/null @@ -1 +0,0 @@ -cloud/oneandone/oneandone_private_network.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/oneandone_public_ip.py b/ansible_collections/community/general/plugins/modules/oneandone_public_ip.py deleted file mode 120000 index 9548070c..00000000 --- a/ansible_collections/community/general/plugins/modules/oneandone_public_ip.py +++ /dev/null @@ -1 +0,0 @@ -cloud/oneandone/oneandone_public_ip.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/oneandone_server.py b/ansible_collections/community/general/plugins/modules/oneandone_server.py deleted file mode 120000 index 27e2b75d..00000000 --- a/ansible_collections/community/general/plugins/modules/oneandone_server.py +++ /dev/null @@ -1 +0,0 @@ -cloud/oneandone/oneandone_server.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/onepassword_info.py b/ansible_collections/community/general/plugins/modules/onepassword_info.py deleted file mode 120000 index 52b921d1..00000000 --- a/ansible_collections/community/general/plugins/modules/onepassword_info.py +++ /dev/null @@ -1 +0,0 @@ -identity/onepassword_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/oneview_datacenter_info.py b/ansible_collections/community/general/plugins/modules/oneview_datacenter_info.py deleted file mode 120000 index caa392ec..00000000 --- a/ansible_collections/community/general/plugins/modules/oneview_datacenter_info.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/oneview/oneview_datacenter_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/oneview_enclosure_info.py b/ansible_collections/community/general/plugins/modules/oneview_enclosure_info.py deleted file mode 120000 index 85ef34a6..00000000 --- a/ansible_collections/community/general/plugins/modules/oneview_enclosure_info.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/oneview/oneview_enclosure_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/oneview_ethernet_network.py b/ansible_collections/community/general/plugins/modules/oneview_ethernet_network.py deleted file mode 120000 index 31f29bb1..00000000 --- a/ansible_collections/community/general/plugins/modules/oneview_ethernet_network.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/oneview/oneview_ethernet_network.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/oneview_ethernet_network_info.py b/ansible_collections/community/general/plugins/modules/oneview_ethernet_network_info.py deleted file mode 120000 index 05a1dae8..00000000 --- a/ansible_collections/community/general/plugins/modules/oneview_ethernet_network_info.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/oneview/oneview_ethernet_network_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/oneview_fc_network.py b/ansible_collections/community/general/plugins/modules/oneview_fc_network.py deleted file mode 120000 index 5b7bb102..00000000 --- a/ansible_collections/community/general/plugins/modules/oneview_fc_network.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/oneview/oneview_fc_network.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/oneview_fc_network_info.py b/ansible_collections/community/general/plugins/modules/oneview_fc_network_info.py deleted file mode 120000 index 365af8ff..00000000 --- a/ansible_collections/community/general/plugins/modules/oneview_fc_network_info.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/oneview/oneview_fc_network_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/oneview_fcoe_network.py b/ansible_collections/community/general/plugins/modules/oneview_fcoe_network.py deleted file mode 120000 index 6bf23426..00000000 --- a/ansible_collections/community/general/plugins/modules/oneview_fcoe_network.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/oneview/oneview_fcoe_network.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/oneview_fcoe_network_info.py b/ansible_collections/community/general/plugins/modules/oneview_fcoe_network_info.py deleted file mode 120000 index d136b715..00000000 --- a/ansible_collections/community/general/plugins/modules/oneview_fcoe_network_info.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/oneview/oneview_fcoe_network_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group.py b/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group.py deleted file mode 120000 index f8fe67d0..00000000 --- a/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/oneview/oneview_logical_interconnect_group.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group_info.py b/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group_info.py deleted file mode 120000 index 281aa277..00000000 --- a/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group_info.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/oneview/oneview_logical_interconnect_group_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/oneview_network_set.py b/ansible_collections/community/general/plugins/modules/oneview_network_set.py deleted file mode 120000 index 6e497e03..00000000 --- a/ansible_collections/community/general/plugins/modules/oneview_network_set.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/oneview/oneview_network_set.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/oneview_network_set_info.py b/ansible_collections/community/general/plugins/modules/oneview_network_set_info.py deleted file mode 120000 index f15ce4a5..00000000 --- a/ansible_collections/community/general/plugins/modules/oneview_network_set_info.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/oneview/oneview_network_set_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/oneview_san_manager.py b/ansible_collections/community/general/plugins/modules/oneview_san_manager.py deleted file mode 120000 index b49c8d7b..00000000 --- a/ansible_collections/community/general/plugins/modules/oneview_san_manager.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/oneview/oneview_san_manager.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/oneview_san_manager_info.py b/ansible_collections/community/general/plugins/modules/oneview_san_manager_info.py deleted file mode 120000 index a34e51de..00000000 --- a/ansible_collections/community/general/plugins/modules/oneview_san_manager_info.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/oneview/oneview_san_manager_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/online_server_info.py b/ansible_collections/community/general/plugins/modules/online_server_info.py deleted file mode 120000 index cc32ccc9..00000000 --- a/ansible_collections/community/general/plugins/modules/online_server_info.py +++ /dev/null @@ -1 +0,0 @@ -cloud/online/online_server_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/online_user_info.py b/ansible_collections/community/general/plugins/modules/online_user_info.py deleted file mode 120000 index d21181f4..00000000 --- a/ansible_collections/community/general/plugins/modules/online_user_info.py +++ /dev/null @@ -1 +0,0 @@ -cloud/online/online_user_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/open_iscsi.py b/ansible_collections/community/general/plugins/modules/open_iscsi.py deleted file mode 120000 index 14e2424d..00000000 --- a/ansible_collections/community/general/plugins/modules/open_iscsi.py +++ /dev/null @@ -1 +0,0 @@ -system/open_iscsi.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/openbsd_pkg.py b/ansible_collections/community/general/plugins/modules/openbsd_pkg.py deleted file mode 120000 index 64aa4143..00000000 --- a/ansible_collections/community/general/plugins/modules/openbsd_pkg.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/openbsd_pkg.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/opendj_backendprop.py b/ansible_collections/community/general/plugins/modules/opendj_backendprop.py deleted file mode 120000 index 1303b013..00000000 --- a/ansible_collections/community/general/plugins/modules/opendj_backendprop.py +++ /dev/null @@ -1 +0,0 @@ -identity/opendj/opendj_backendprop.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/openwrt_init.py b/ansible_collections/community/general/plugins/modules/openwrt_init.py deleted file mode 120000 index 81c41bff..00000000 --- a/ansible_collections/community/general/plugins/modules/openwrt_init.py +++ /dev/null @@ -1 +0,0 @@ -system/openwrt_init.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/opkg.py b/ansible_collections/community/general/plugins/modules/opkg.py deleted file mode 120000 index 2136d3b1..00000000 --- a/ansible_collections/community/general/plugins/modules/opkg.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/opkg.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/osx_defaults.py b/ansible_collections/community/general/plugins/modules/osx_defaults.py deleted file mode 120000 index 7c64eb1d..00000000 --- a/ansible_collections/community/general/plugins/modules/osx_defaults.py +++ /dev/null @@ -1 +0,0 @@ -system/osx_defaults.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ovh_ip_failover.py b/ansible_collections/community/general/plugins/modules/ovh_ip_failover.py deleted file mode 120000 index 0a00c885..00000000 --- a/ansible_collections/community/general/plugins/modules/ovh_ip_failover.py +++ /dev/null @@ -1 +0,0 @@ -cloud/ovh/ovh_ip_failover.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ovh_ip_loadbalancing_backend.py b/ansible_collections/community/general/plugins/modules/ovh_ip_loadbalancing_backend.py deleted file mode 120000 index c3bd14ea..00000000 --- a/ansible_collections/community/general/plugins/modules/ovh_ip_loadbalancing_backend.py +++ /dev/null @@ -1 +0,0 @@ -cloud/ovh/ovh_ip_loadbalancing_backend.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ovh_monthly_billing.py b/ansible_collections/community/general/plugins/modules/ovh_monthly_billing.py deleted file mode 120000 index f87356f8..00000000 --- a/ansible_collections/community/general/plugins/modules/ovh_monthly_billing.py +++ /dev/null @@ -1 +0,0 @@ -cloud/ovh/ovh_monthly_billing.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/pacemaker_cluster.py b/ansible_collections/community/general/plugins/modules/pacemaker_cluster.py deleted file mode 120000 index db03a610..00000000 --- a/ansible_collections/community/general/plugins/modules/pacemaker_cluster.py +++ /dev/null @@ -1 +0,0 @@ -clustering/pacemaker_cluster.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/packet_device.py b/ansible_collections/community/general/plugins/modules/packet_device.py deleted file mode 120000 index fa63af28..00000000 --- a/ansible_collections/community/general/plugins/modules/packet_device.py +++ /dev/null @@ -1 +0,0 @@ -cloud/packet/packet_device.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/packet_ip_subnet.py b/ansible_collections/community/general/plugins/modules/packet_ip_subnet.py deleted file mode 120000 index cc460173..00000000 --- a/ansible_collections/community/general/plugins/modules/packet_ip_subnet.py +++ /dev/null @@ -1 +0,0 @@ -cloud/packet/packet_ip_subnet.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/packet_project.py b/ansible_collections/community/general/plugins/modules/packet_project.py deleted file mode 120000 index ba0b4335..00000000 --- a/ansible_collections/community/general/plugins/modules/packet_project.py +++ /dev/null @@ -1 +0,0 @@ -cloud/packet/packet_project.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/packet_sshkey.py b/ansible_collections/community/general/plugins/modules/packet_sshkey.py deleted file mode 120000 index 91c25408..00000000 --- a/ansible_collections/community/general/plugins/modules/packet_sshkey.py +++ /dev/null @@ -1 +0,0 @@ -cloud/packet/packet_sshkey.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/packet_volume.py b/ansible_collections/community/general/plugins/modules/packet_volume.py deleted file mode 120000 index 2c386d29..00000000 --- a/ansible_collections/community/general/plugins/modules/packet_volume.py +++ /dev/null @@ -1 +0,0 @@ -cloud/packet/packet_volume.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/packet_volume_attachment.py b/ansible_collections/community/general/plugins/modules/packet_volume_attachment.py deleted file mode 120000 index 32f42457..00000000 --- a/ansible_collections/community/general/plugins/modules/packet_volume_attachment.py +++ /dev/null @@ -1 +0,0 @@ -cloud/packet/packet_volume_attachment.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/pacman.py b/ansible_collections/community/general/plugins/modules/pacman.py deleted file mode 120000 index 186f0ee8..00000000 --- a/ansible_collections/community/general/plugins/modules/pacman.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/pacman.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/pacman_key.py b/ansible_collections/community/general/plugins/modules/pacman_key.py deleted file mode 120000 index ac0f4482..00000000 --- a/ansible_collections/community/general/plugins/modules/pacman_key.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/pacman_key.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/pagerduty.py b/ansible_collections/community/general/plugins/modules/pagerduty.py deleted file mode 120000 index 74600da5..00000000 --- a/ansible_collections/community/general/plugins/modules/pagerduty.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/pagerduty.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/pagerduty_alert.py b/ansible_collections/community/general/plugins/modules/pagerduty_alert.py deleted file mode 120000 index 3f58a04e..00000000 --- a/ansible_collections/community/general/plugins/modules/pagerduty_alert.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/pagerduty_alert.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/pagerduty_change.py b/ansible_collections/community/general/plugins/modules/pagerduty_change.py deleted file mode 120000 index 3c1ee647..00000000 --- a/ansible_collections/community/general/plugins/modules/pagerduty_change.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/pagerduty_change.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/pagerduty_user.py b/ansible_collections/community/general/plugins/modules/pagerduty_user.py deleted file mode 120000 index 0c79c81b..00000000 --- a/ansible_collections/community/general/plugins/modules/pagerduty_user.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/pagerduty_user.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/pam_limits.py b/ansible_collections/community/general/plugins/modules/pam_limits.py deleted file mode 120000 index 29ee5433..00000000 --- a/ansible_collections/community/general/plugins/modules/pam_limits.py +++ /dev/null @@ -1 +0,0 @@ -system/pam_limits.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/pamd.py b/ansible_collections/community/general/plugins/modules/pamd.py deleted file mode 120000 index 45f5549c..00000000 --- a/ansible_collections/community/general/plugins/modules/pamd.py +++ /dev/null @@ -1 +0,0 @@ -system/pamd.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/parted.py b/ansible_collections/community/general/plugins/modules/parted.py deleted file mode 120000 index f10251ed..00000000 --- a/ansible_collections/community/general/plugins/modules/parted.py +++ /dev/null @@ -1 +0,0 @@ -system/parted.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/pear.py b/ansible_collections/community/general/plugins/modules/pear.py deleted file mode 120000 index 41a5f17b..00000000 --- a/ansible_collections/community/general/plugins/modules/pear.py +++ /dev/null @@ -1 +0,0 @@ -packaging/language/pear.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/pids.py b/ansible_collections/community/general/plugins/modules/pids.py deleted file mode 120000 index dcd9d270..00000000 --- a/ansible_collections/community/general/plugins/modules/pids.py +++ /dev/null @@ -1 +0,0 @@ -system/pids.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/pingdom.py b/ansible_collections/community/general/plugins/modules/pingdom.py deleted file mode 120000 index 0cb8a96f..00000000 --- a/ansible_collections/community/general/plugins/modules/pingdom.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/pingdom.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/pip_package_info.py b/ansible_collections/community/general/plugins/modules/pip_package_info.py deleted file mode 120000 index 5b41b6bb..00000000 --- a/ansible_collections/community/general/plugins/modules/pip_package_info.py +++ /dev/null @@ -1 +0,0 @@ -packaging/language/pip_package_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/pipx.py b/ansible_collections/community/general/plugins/modules/pipx.py deleted file mode 120000 index 7377ae0f..00000000 --- a/ansible_collections/community/general/plugins/modules/pipx.py +++ /dev/null @@ -1 +0,0 @@ -packaging/language/pipx.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/pkg5.py b/ansible_collections/community/general/plugins/modules/pkg5.py deleted file mode 120000 index f10c473b..00000000 --- a/ansible_collections/community/general/plugins/modules/pkg5.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/pkg5.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/pkg5_publisher.py b/ansible_collections/community/general/plugins/modules/pkg5_publisher.py deleted file mode 120000 index 4388a902..00000000 --- a/ansible_collections/community/general/plugins/modules/pkg5_publisher.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/pkg5_publisher.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/pkgin.py b/ansible_collections/community/general/plugins/modules/pkgin.py deleted file mode 120000 index 77629c68..00000000 --- a/ansible_collections/community/general/plugins/modules/pkgin.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/pkgin.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/pkgng.py b/ansible_collections/community/general/plugins/modules/pkgng.py deleted file mode 120000 index 7ba7986f..00000000 --- a/ansible_collections/community/general/plugins/modules/pkgng.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/pkgng.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/pkgutil.py b/ansible_collections/community/general/plugins/modules/pkgutil.py deleted file mode 120000 index cc077881..00000000 --- a/ansible_collections/community/general/plugins/modules/pkgutil.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/pkgutil.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/pmem.py b/ansible_collections/community/general/plugins/modules/pmem.py deleted file mode 120000 index 6ae8a6a7..00000000 --- a/ansible_collections/community/general/plugins/modules/pmem.py +++ /dev/null @@ -1 +0,0 @@ -storage/pmem/pmem.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/portage.py b/ansible_collections/community/general/plugins/modules/portage.py deleted file mode 120000 index df8ec172..00000000 --- a/ansible_collections/community/general/plugins/modules/portage.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/portage.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/portinstall.py b/ansible_collections/community/general/plugins/modules/portinstall.py deleted file mode 120000 index 94d57abf..00000000 --- a/ansible_collections/community/general/plugins/modules/portinstall.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/portinstall.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/pritunl_org.py b/ansible_collections/community/general/plugins/modules/pritunl_org.py deleted file mode 120000 index c022be56..00000000 --- a/ansible_collections/community/general/plugins/modules/pritunl_org.py +++ /dev/null @@ -1 +0,0 @@ -net_tools/pritunl/pritunl_org.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/pritunl_org_info.py b/ansible_collections/community/general/plugins/modules/pritunl_org_info.py deleted file mode 120000 index e76d52ae..00000000 --- a/ansible_collections/community/general/plugins/modules/pritunl_org_info.py +++ /dev/null @@ -1 +0,0 @@ -net_tools/pritunl/pritunl_org_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/pritunl_user.py b/ansible_collections/community/general/plugins/modules/pritunl_user.py deleted file mode 120000 index 42f3e7c7..00000000 --- a/ansible_collections/community/general/plugins/modules/pritunl_user.py +++ /dev/null @@ -1 +0,0 @@ -net_tools/pritunl/pritunl_user.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/pritunl_user_info.py b/ansible_collections/community/general/plugins/modules/pritunl_user_info.py deleted file mode 120000 index bfabbe0c..00000000 --- a/ansible_collections/community/general/plugins/modules/pritunl_user_info.py +++ /dev/null @@ -1 +0,0 @@ -net_tools/pritunl/pritunl_user_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/profitbricks.py b/ansible_collections/community/general/plugins/modules/profitbricks.py deleted file mode 120000 index 1ff66519..00000000 --- a/ansible_collections/community/general/plugins/modules/profitbricks.py +++ /dev/null @@ -1 +0,0 @@ -cloud/profitbricks/profitbricks.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/profitbricks_datacenter.py b/ansible_collections/community/general/plugins/modules/profitbricks_datacenter.py deleted file mode 120000 index 9a16ce59..00000000 --- a/ansible_collections/community/general/plugins/modules/profitbricks_datacenter.py +++ /dev/null @@ -1 +0,0 @@ -cloud/profitbricks/profitbricks_datacenter.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/profitbricks_nic.py b/ansible_collections/community/general/plugins/modules/profitbricks_nic.py deleted file mode 120000 index 7c7c04e5..00000000 --- a/ansible_collections/community/general/plugins/modules/profitbricks_nic.py +++ /dev/null @@ -1 +0,0 @@ -cloud/profitbricks/profitbricks_nic.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/profitbricks_volume.py b/ansible_collections/community/general/plugins/modules/profitbricks_volume.py deleted file mode 120000 index 59cebed5..00000000 --- a/ansible_collections/community/general/plugins/modules/profitbricks_volume.py +++ /dev/null @@ -1 +0,0 @@ -cloud/profitbricks/profitbricks_volume.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/profitbricks_volume_attachments.py b/ansible_collections/community/general/plugins/modules/profitbricks_volume_attachments.py deleted file mode 120000 index 4160fa3e..00000000 --- a/ansible_collections/community/general/plugins/modules/profitbricks_volume_attachments.py +++ /dev/null @@ -1 +0,0 @@ -cloud/profitbricks/profitbricks_volume_attachments.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/proxmox.py b/ansible_collections/community/general/plugins/modules/proxmox.py deleted file mode 120000 index c472ad2d..00000000 --- a/ansible_collections/community/general/plugins/modules/proxmox.py +++ /dev/null @@ -1 +0,0 @@ -cloud/misc/proxmox.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/proxmox_domain_info.py b/ansible_collections/community/general/plugins/modules/proxmox_domain_info.py deleted file mode 120000 index a14a61a3..00000000 --- a/ansible_collections/community/general/plugins/modules/proxmox_domain_info.py +++ /dev/null @@ -1 +0,0 @@ -cloud/misc/proxmox_domain_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/proxmox_group_info.py b/ansible_collections/community/general/plugins/modules/proxmox_group_info.py deleted file mode 120000 index f9b76074..00000000 --- a/ansible_collections/community/general/plugins/modules/proxmox_group_info.py +++ /dev/null @@ -1 +0,0 @@ -cloud/misc/proxmox_group_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/proxmox_kvm.py b/ansible_collections/community/general/plugins/modules/proxmox_kvm.py deleted file mode 120000 index 2a066310..00000000 --- a/ansible_collections/community/general/plugins/modules/proxmox_kvm.py +++ /dev/null @@ -1 +0,0 @@ -cloud/misc/proxmox_kvm.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/proxmox_nic.py b/ansible_collections/community/general/plugins/modules/proxmox_nic.py deleted file mode 120000 index 88756ab6..00000000 --- a/ansible_collections/community/general/plugins/modules/proxmox_nic.py +++ /dev/null @@ -1 +0,0 @@ -cloud/misc/proxmox_nic.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/proxmox_snap.py b/ansible_collections/community/general/plugins/modules/proxmox_snap.py deleted file mode 120000 index b4b80c78..00000000 --- a/ansible_collections/community/general/plugins/modules/proxmox_snap.py +++ /dev/null @@ -1 +0,0 @@ -cloud/misc/proxmox_snap.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/proxmox_storage_info.py b/ansible_collections/community/general/plugins/modules/proxmox_storage_info.py deleted file mode 120000 index 81283005..00000000 --- a/ansible_collections/community/general/plugins/modules/proxmox_storage_info.py +++ /dev/null @@ -1 +0,0 @@ -cloud/misc/proxmox_storage_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/proxmox_tasks_info.py b/ansible_collections/community/general/plugins/modules/proxmox_tasks_info.py deleted file mode 120000 index 34343b85..00000000 --- a/ansible_collections/community/general/plugins/modules/proxmox_tasks_info.py +++ /dev/null @@ -1 +0,0 @@ -cloud/misc/proxmox_tasks_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/proxmox_template.py b/ansible_collections/community/general/plugins/modules/proxmox_template.py deleted file mode 120000 index ef47a032..00000000 --- a/ansible_collections/community/general/plugins/modules/proxmox_template.py +++ /dev/null @@ -1 +0,0 @@ -cloud/misc/proxmox_template.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/proxmox_user_info.py b/ansible_collections/community/general/plugins/modules/proxmox_user_info.py deleted file mode 120000 index a713ac8d..00000000 --- a/ansible_collections/community/general/plugins/modules/proxmox_user_info.py +++ /dev/null @@ -1 +0,0 @@ -cloud/misc/proxmox_user_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/pubnub_blocks.py b/ansible_collections/community/general/plugins/modules/pubnub_blocks.py deleted file mode 120000 index 5ccf0140..00000000 --- a/ansible_collections/community/general/plugins/modules/pubnub_blocks.py +++ /dev/null @@ -1 +0,0 @@ -cloud/pubnub/pubnub_blocks.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/pulp_repo.py b/ansible_collections/community/general/plugins/modules/pulp_repo.py deleted file mode 120000 index 41558a47..00000000 --- a/ansible_collections/community/general/plugins/modules/pulp_repo.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/pulp_repo.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/puppet.py b/ansible_collections/community/general/plugins/modules/puppet.py deleted file mode 120000 index 7649c6aa..00000000 --- a/ansible_collections/community/general/plugins/modules/puppet.py +++ /dev/null @@ -1 +0,0 @@ -system/puppet.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/pushbullet.py b/ansible_collections/community/general/plugins/modules/pushbullet.py deleted file mode 120000 index 4a20877c..00000000 --- a/ansible_collections/community/general/plugins/modules/pushbullet.py +++ /dev/null @@ -1 +0,0 @@ -notification/pushbullet.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/pushover.py b/ansible_collections/community/general/plugins/modules/pushover.py deleted file mode 120000 index c3532720..00000000 --- a/ansible_collections/community/general/plugins/modules/pushover.py +++ /dev/null @@ -1 +0,0 @@ -notification/pushover.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/python_requirements_info.py b/ansible_collections/community/general/plugins/modules/python_requirements_info.py deleted file mode 120000 index da184b89..00000000 --- a/ansible_collections/community/general/plugins/modules/python_requirements_info.py +++ /dev/null @@ -1 +0,0 @@ -system/python_requirements_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rax.py b/ansible_collections/community/general/plugins/modules/rax.py deleted file mode 120000 index abb3907e..00000000 --- a/ansible_collections/community/general/plugins/modules/rax.py +++ /dev/null @@ -1 +0,0 @@ -cloud/rackspace/rax.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rax_cbs.py b/ansible_collections/community/general/plugins/modules/rax_cbs.py deleted file mode 120000 index a6d26016..00000000 --- a/ansible_collections/community/general/plugins/modules/rax_cbs.py +++ /dev/null @@ -1 +0,0 @@ -cloud/rackspace/rax_cbs.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rax_cbs_attachments.py b/ansible_collections/community/general/plugins/modules/rax_cbs_attachments.py deleted file mode 120000 index 6e3ba780..00000000 --- a/ansible_collections/community/general/plugins/modules/rax_cbs_attachments.py +++ /dev/null @@ -1 +0,0 @@ -cloud/rackspace/rax_cbs_attachments.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rax_cdb.py b/ansible_collections/community/general/plugins/modules/rax_cdb.py deleted file mode 120000 index 922414a0..00000000 --- a/ansible_collections/community/general/plugins/modules/rax_cdb.py +++ /dev/null @@ -1 +0,0 @@ -cloud/rackspace/rax_cdb.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rax_cdb_database.py b/ansible_collections/community/general/plugins/modules/rax_cdb_database.py deleted file mode 120000 index a4e92ec8..00000000 --- a/ansible_collections/community/general/plugins/modules/rax_cdb_database.py +++ /dev/null @@ -1 +0,0 @@ -cloud/rackspace/rax_cdb_database.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rax_cdb_user.py b/ansible_collections/community/general/plugins/modules/rax_cdb_user.py deleted file mode 120000 index 9f7d7e54..00000000 --- a/ansible_collections/community/general/plugins/modules/rax_cdb_user.py +++ /dev/null @@ -1 +0,0 @@ -cloud/rackspace/rax_cdb_user.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rax_clb.py b/ansible_collections/community/general/plugins/modules/rax_clb.py deleted file mode 120000 index b883f876..00000000 --- a/ansible_collections/community/general/plugins/modules/rax_clb.py +++ /dev/null @@ -1 +0,0 @@ -cloud/rackspace/rax_clb.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rax_clb_nodes.py b/ansible_collections/community/general/plugins/modules/rax_clb_nodes.py deleted file mode 120000 index e4f2a4d8..00000000 --- a/ansible_collections/community/general/plugins/modules/rax_clb_nodes.py +++ /dev/null @@ -1 +0,0 @@ -cloud/rackspace/rax_clb_nodes.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rax_clb_ssl.py b/ansible_collections/community/general/plugins/modules/rax_clb_ssl.py deleted file mode 120000 index e33aceee..00000000 --- a/ansible_collections/community/general/plugins/modules/rax_clb_ssl.py +++ /dev/null @@ -1 +0,0 @@ -cloud/rackspace/rax_clb_ssl.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rax_dns.py b/ansible_collections/community/general/plugins/modules/rax_dns.py deleted file mode 120000 index f27fb1fd..00000000 --- a/ansible_collections/community/general/plugins/modules/rax_dns.py +++ /dev/null @@ -1 +0,0 @@ -cloud/rackspace/rax_dns.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rax_dns_record.py b/ansible_collections/community/general/plugins/modules/rax_dns_record.py deleted file mode 120000 index 74a776c6..00000000 --- a/ansible_collections/community/general/plugins/modules/rax_dns_record.py +++ /dev/null @@ -1 +0,0 @@ -cloud/rackspace/rax_dns_record.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rax_facts.py b/ansible_collections/community/general/plugins/modules/rax_facts.py deleted file mode 120000 index 2d0e4dbc..00000000 --- a/ansible_collections/community/general/plugins/modules/rax_facts.py +++ /dev/null @@ -1 +0,0 @@ -cloud/rackspace/rax_facts.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rax_files.py b/ansible_collections/community/general/plugins/modules/rax_files.py deleted file mode 120000 index 58064fe3..00000000 --- a/ansible_collections/community/general/plugins/modules/rax_files.py +++ /dev/null @@ -1 +0,0 @@ -cloud/rackspace/rax_files.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rax_files_objects.py b/ansible_collections/community/general/plugins/modules/rax_files_objects.py deleted file mode 120000 index d76ac352..00000000 --- a/ansible_collections/community/general/plugins/modules/rax_files_objects.py +++ /dev/null @@ -1 +0,0 @@ -cloud/rackspace/rax_files_objects.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rax_identity.py b/ansible_collections/community/general/plugins/modules/rax_identity.py deleted file mode 120000 index 05ba70c9..00000000 --- a/ansible_collections/community/general/plugins/modules/rax_identity.py +++ /dev/null @@ -1 +0,0 @@ -cloud/rackspace/rax_identity.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rax_keypair.py b/ansible_collections/community/general/plugins/modules/rax_keypair.py deleted file mode 120000 index 3f4cca2e..00000000 --- a/ansible_collections/community/general/plugins/modules/rax_keypair.py +++ /dev/null @@ -1 +0,0 @@ -cloud/rackspace/rax_keypair.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rax_meta.py b/ansible_collections/community/general/plugins/modules/rax_meta.py deleted file mode 120000 index 9774648c..00000000 --- a/ansible_collections/community/general/plugins/modules/rax_meta.py +++ /dev/null @@ -1 +0,0 @@ -cloud/rackspace/rax_meta.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rax_mon_alarm.py b/ansible_collections/community/general/plugins/modules/rax_mon_alarm.py deleted file mode 120000 index 3e76e256..00000000 --- a/ansible_collections/community/general/plugins/modules/rax_mon_alarm.py +++ /dev/null @@ -1 +0,0 @@ -cloud/rackspace/rax_mon_alarm.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rax_mon_check.py b/ansible_collections/community/general/plugins/modules/rax_mon_check.py deleted file mode 120000 index 948a2f1a..00000000 --- a/ansible_collections/community/general/plugins/modules/rax_mon_check.py +++ /dev/null @@ -1 +0,0 @@ -cloud/rackspace/rax_mon_check.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rax_mon_entity.py b/ansible_collections/community/general/plugins/modules/rax_mon_entity.py deleted file mode 120000 index 09af28c2..00000000 --- a/ansible_collections/community/general/plugins/modules/rax_mon_entity.py +++ /dev/null @@ -1 +0,0 @@ -cloud/rackspace/rax_mon_entity.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rax_mon_notification.py b/ansible_collections/community/general/plugins/modules/rax_mon_notification.py deleted file mode 120000 index 44b23249..00000000 --- a/ansible_collections/community/general/plugins/modules/rax_mon_notification.py +++ /dev/null @@ -1 +0,0 @@ -cloud/rackspace/rax_mon_notification.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rax_mon_notification_plan.py b/ansible_collections/community/general/plugins/modules/rax_mon_notification_plan.py deleted file mode 120000 index 9d3b95a3..00000000 --- a/ansible_collections/community/general/plugins/modules/rax_mon_notification_plan.py +++ /dev/null @@ -1 +0,0 @@ -cloud/rackspace/rax_mon_notification_plan.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rax_network.py b/ansible_collections/community/general/plugins/modules/rax_network.py deleted file mode 120000 index 00b16ee1..00000000 --- a/ansible_collections/community/general/plugins/modules/rax_network.py +++ /dev/null @@ -1 +0,0 @@ -cloud/rackspace/rax_network.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rax_queue.py b/ansible_collections/community/general/plugins/modules/rax_queue.py deleted file mode 120000 index 2a8c94d5..00000000 --- a/ansible_collections/community/general/plugins/modules/rax_queue.py +++ /dev/null @@ -1 +0,0 @@ -cloud/rackspace/rax_queue.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rax_scaling_group.py b/ansible_collections/community/general/plugins/modules/rax_scaling_group.py deleted file mode 120000 index 6a73fc0b..00000000 --- a/ansible_collections/community/general/plugins/modules/rax_scaling_group.py +++ /dev/null @@ -1 +0,0 @@ -cloud/rackspace/rax_scaling_group.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rax_scaling_policy.py b/ansible_collections/community/general/plugins/modules/rax_scaling_policy.py deleted file mode 120000 index 5c7e365e..00000000 --- a/ansible_collections/community/general/plugins/modules/rax_scaling_policy.py +++ /dev/null @@ -1 +0,0 @@ -cloud/rackspace/rax_scaling_policy.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/read_csv.py b/ansible_collections/community/general/plugins/modules/read_csv.py deleted file mode 120000 index 6cb7d4e8..00000000 --- a/ansible_collections/community/general/plugins/modules/read_csv.py +++ /dev/null @@ -1 +0,0 @@ -files/read_csv.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/redfish_command.py b/ansible_collections/community/general/plugins/modules/redfish_command.py deleted file mode 120000 index 538d0c8e..00000000 --- a/ansible_collections/community/general/plugins/modules/redfish_command.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/redfish/redfish_command.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/redfish_config.py b/ansible_collections/community/general/plugins/modules/redfish_config.py deleted file mode 120000 index 7f5d0480..00000000 --- a/ansible_collections/community/general/plugins/modules/redfish_config.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/redfish/redfish_config.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/redfish_info.py b/ansible_collections/community/general/plugins/modules/redfish_info.py deleted file mode 120000 index b9ae79e6..00000000 --- a/ansible_collections/community/general/plugins/modules/redfish_info.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/redfish/redfish_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/redhat_subscription.py b/ansible_collections/community/general/plugins/modules/redhat_subscription.py deleted file mode 120000 index 12e0c46b..00000000 --- a/ansible_collections/community/general/plugins/modules/redhat_subscription.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/redhat_subscription.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/redis.py b/ansible_collections/community/general/plugins/modules/redis.py deleted file mode 120000 index ea0eedfc..00000000 --- a/ansible_collections/community/general/plugins/modules/redis.py +++ /dev/null @@ -1 +0,0 @@ -database/misc/redis.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/redis_data.py b/ansible_collections/community/general/plugins/modules/redis_data.py deleted file mode 120000 index fd32277c..00000000 --- a/ansible_collections/community/general/plugins/modules/redis_data.py +++ /dev/null @@ -1 +0,0 @@ -database/misc/redis_data.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/redis_data_incr.py b/ansible_collections/community/general/plugins/modules/redis_data_incr.py deleted file mode 120000 index c5864fa9..00000000 --- a/ansible_collections/community/general/plugins/modules/redis_data_incr.py +++ /dev/null @@ -1 +0,0 @@ -database/misc/redis_data_incr.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/redis_data_info.py b/ansible_collections/community/general/plugins/modules/redis_data_info.py deleted file mode 120000 index 14c54fb2..00000000 --- a/ansible_collections/community/general/plugins/modules/redis_data_info.py +++ /dev/null @@ -1 +0,0 @@ -database/misc/redis_data_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/redis_info.py b/ansible_collections/community/general/plugins/modules/redis_info.py deleted file mode 120000 index f9c39b25..00000000 --- a/ansible_collections/community/general/plugins/modules/redis_info.py +++ /dev/null @@ -1 +0,0 @@ -database/misc/redis_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/remote_management/cobbler/cobbler_sync.py b/ansible_collections/community/general/plugins/modules/remote_management/cobbler/cobbler_sync.py deleted file mode 100644 index 15720821..00000000 --- a/ansible_collections/community/general/plugins/modules/remote_management/cobbler/cobbler_sync.py +++ /dev/null @@ -1,142 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, Dag Wieers (dagwieers) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: cobbler_sync -short_description: Sync Cobbler -description: -- Sync Cobbler to commit changes. -options: - host: - description: - - The name or IP address of the Cobbler system. - default: 127.0.0.1 - type: str - port: - description: - - Port number to be used for REST connection. - - The default value depends on parameter C(use_ssl). - type: int - username: - description: - - The username to log in to Cobbler. - default: cobbler - type: str - password: - description: - - The password to log in to Cobbler. - type: str - use_ssl: - description: - - If C(no), an HTTP connection will be used instead of the default HTTPS connection. - type: bool - default: 'yes' - validate_certs: - description: - - If C(no), SSL certificates will not be validated. - - This should only set to C(no) when used on personally controlled sites using self-signed certificates. - type: bool - default: 'yes' -author: -- Dag Wieers (@dagwieers) -todo: -notes: -- Concurrently syncing Cobbler is bound to fail with weird errors. -- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation. - More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753). -''' - -EXAMPLES = r''' -- name: Commit Cobbler changes - community.general.cobbler_sync: - host: cobbler01 - username: cobbler - password: MySuperSecureP4sswOrd - run_once: yes - delegate_to: localhost -''' - -RETURN = r''' -# Default return values -''' - -import datetime -import ssl - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xmlrpc_client -from ansible.module_utils.common.text.converters import to_text - - -def main(): - module = AnsibleModule( - argument_spec=dict( - host=dict(type='str', default='127.0.0.1'), - port=dict(type='int'), - username=dict(type='str', default='cobbler'), - password=dict(type='str', no_log=True), - use_ssl=dict(type='bool', default=True), - validate_certs=dict(type='bool', default=True), - ), - supports_check_mode=True, - ) - - username = module.params['username'] - password = module.params['password'] - port = module.params['port'] - use_ssl = module.params['use_ssl'] - validate_certs = module.params['validate_certs'] - - module.params['proto'] = 'https' if use_ssl else 'http' - if not port: - module.params['port'] = '443' if use_ssl else '80' - - result = dict( - changed=True, - ) - - start = datetime.datetime.utcnow() - - ssl_context = None - if not validate_certs: - try: - ssl_context = ssl._create_unverified_context() - except AttributeError: - # Legacy Python that doesn't verify HTTPS certificates by default - pass - else: - # Handle target environment that doesn't support HTTPS verification - ssl._create_default_https_context = ssl._create_unverified_context - - url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params) - if ssl_context: - conn = xmlrpc_client.ServerProxy(url, context=ssl_context) - else: - conn = xmlrpc_client.Server(url) - - try: - token = conn.login(username, password) - except xmlrpc_client.Fault as e: - module.fail_json(msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format(url=url, error=to_text(e), **module.params)) - except Exception as e: - module.fail_json(msg="Connection to '{url}' failed. {error}".format(url=url, error=to_text(e))) - - if not module.check_mode: - try: - conn.sync(token) - except Exception as e: - module.fail_json(msg="Failed to sync Cobbler. {error}".format(error=to_text(e))) - - elapsed = datetime.datetime.utcnow() - start - module.exit_json(elapsed=elapsed.seconds, **result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/remote_management/cobbler/cobbler_system.py b/ansible_collections/community/general/plugins/modules/remote_management/cobbler/cobbler_system.py deleted file mode 100644 index e97be012..00000000 --- a/ansible_collections/community/general/plugins/modules/remote_management/cobbler/cobbler_system.py +++ /dev/null @@ -1,341 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, Dag Wieers (dagwieers) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: cobbler_system -short_description: Manage system objects in Cobbler -description: -- Add, modify or remove systems in Cobbler -options: - host: - description: - - The name or IP address of the Cobbler system. - default: 127.0.0.1 - type: str - port: - description: - - Port number to be used for REST connection. - - The default value depends on parameter C(use_ssl). - type: int - username: - description: - - The username to log in to Cobbler. - default: cobbler - type: str - password: - description: - - The password to log in to Cobbler. - type: str - use_ssl: - description: - - If C(no), an HTTP connection will be used instead of the default HTTPS connection. - type: bool - default: 'yes' - validate_certs: - description: - - If C(no), SSL certificates will not be validated. - - This should only set to C(no) when used on personally controlled sites using self-signed certificates. - type: bool - default: 'yes' - name: - description: - - The system name to manage. - type: str - properties: - description: - - A dictionary with system properties. - type: dict - interfaces: - description: - - A list of dictionaries containing interface options. - type: dict - sync: - description: - - Sync on changes. - - Concurrently syncing Cobbler is bound to fail. - type: bool - default: no - state: - description: - - Whether the system should be present, absent or a query is made. - choices: [ absent, present, query ] - default: present - type: str -author: -- Dag Wieers (@dagwieers) -notes: -- Concurrently syncing Cobbler is bound to fail with weird errors. -- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation. - More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753). -''' - -EXAMPLES = r''' -- name: Ensure the system exists in Cobbler - community.general.cobbler_system: - host: cobbler01 - username: cobbler - password: MySuperSecureP4sswOrd - name: myhost - properties: - profile: CentOS6-x86_64 - name_servers: [ 2.3.4.5, 3.4.5.6 ] - name_servers_search: foo.com, bar.com - interfaces: - eth0: - macaddress: 00:01:02:03:04:05 - ipaddress: 1.2.3.4 - delegate_to: localhost - -- name: Enable network boot in Cobbler - community.general.cobbler_system: - host: bdsol-aci-cobbler-01 - username: cobbler - password: ins3965! - name: bdsol-aci51-apic1.cisco.com - properties: - netboot_enabled: yes - state: present - delegate_to: localhost - -- name: Query all systems in Cobbler - community.general.cobbler_system: - host: cobbler01 - username: cobbler - password: MySuperSecureP4sswOrd - state: query - register: cobbler_systems - delegate_to: localhost - -- name: Query a specific system in Cobbler - community.general.cobbler_system: - host: cobbler01 - username: cobbler - password: MySuperSecureP4sswOrd - name: '{{ inventory_hostname }}' - state: query - register: cobbler_properties - delegate_to: localhost - -- name: Ensure the system does not exist in Cobbler - community.general.cobbler_system: - host: cobbler01 - username: cobbler - password: MySuperSecureP4sswOrd - name: myhost - state: absent - delegate_to: localhost -''' - -RETURN = r''' -systems: - description: List of systems - returned: C(state=query) and C(name) is not provided - type: list -system: - description: (Resulting) information about the system we are working with - returned: when C(name) is provided - type: dict -''' - -import copy -import datetime -import ssl - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six import iteritems -from ansible.module_utils.six.moves import xmlrpc_client -from ansible.module_utils.common.text.converters import to_text - -IFPROPS_MAPPING = dict( - bondingopts='bonding_opts', - bridgeopts='bridge_opts', - connected_mode='connected_mode', - cnames='cnames', - dhcptag='dhcp_tag', - dnsname='dns_name', - ifgateway='if_gateway', - interfacetype='interface_type', - interfacemaster='interface_master', - ipaddress='ip_address', - ipv6address='ipv6_address', - ipv6defaultgateway='ipv6_default_gateway', - ipv6mtu='ipv6_mtu', - ipv6prefix='ipv6_prefix', - ipv6secondaries='ipv6_secondariesu', - ipv6staticroutes='ipv6_static_routes', - macaddress='mac_address', - management='management', - mtu='mtu', - netmask='netmask', - static='static', - staticroutes='static_routes', - virtbridge='virt_bridge', -) - - -def getsystem(conn, name, token): - system = dict() - if name: - # system = conn.get_system(name, token) - systems = conn.find_system(dict(name=name), token) - if systems: - system = systems[0] - return system - - -def main(): - module = AnsibleModule( - argument_spec=dict( - host=dict(type='str', default='127.0.0.1'), - port=dict(type='int'), - username=dict(type='str', default='cobbler'), - password=dict(type='str', no_log=True), - use_ssl=dict(type='bool', default=True), - validate_certs=dict(type='bool', default=True), - name=dict(type='str'), - interfaces=dict(type='dict'), - properties=dict(type='dict'), - sync=dict(type='bool', default=False), - state=dict(type='str', default='present', choices=['absent', 'present', 'query']), - ), - supports_check_mode=True, - ) - - username = module.params['username'] - password = module.params['password'] - port = module.params['port'] - use_ssl = module.params['use_ssl'] - validate_certs = module.params['validate_certs'] - - name = module.params['name'] - state = module.params['state'] - - module.params['proto'] = 'https' if use_ssl else 'http' - if not port: - module.params['port'] = '443' if use_ssl else '80' - - result = dict( - changed=False, - ) - - start = datetime.datetime.utcnow() - - ssl_context = None - if not validate_certs: - try: - ssl_context = ssl._create_unverified_context() - except AttributeError: - # Legacy Python that doesn't verify HTTPS certificates by default - pass - else: - # Handle target environment that doesn't support HTTPS verification - ssl._create_default_https_context = ssl._create_unverified_context - - url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params) - if ssl_context: - conn = xmlrpc_client.ServerProxy(url, context=ssl_context) - else: - conn = xmlrpc_client.Server(url) - - try: - token = conn.login(username, password) - except xmlrpc_client.Fault as e: - module.fail_json(msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format(url=url, error=to_text(e), **module.params)) - except Exception as e: - module.fail_json(msg="Connection to '{url}' failed. {error}".format(url=url, error=to_text(e), **module.params)) - - system = getsystem(conn, name, token) - # result['system'] = system - - if state == 'query': - if name: - result['system'] = system - else: - # Turn it into a dictionary of dictionaries - # all_systems = conn.get_systems() - # result['systems'] = { system['name']: system for system in all_systems } - - # Return a list of dictionaries - result['systems'] = conn.get_systems() - - elif state == 'present': - - if system: - # Update existing entry - system_id = conn.get_system_handle(name, token) - - for key, value in iteritems(module.params['properties']): - if key not in system: - module.warn("Property '{0}' is not a valid system property.".format(key)) - if system[key] != value: - try: - conn.modify_system(system_id, key, value, token) - result['changed'] = True - except Exception as e: - module.fail_json(msg="Unable to change '{0}' to '{1}'. {2}".format(key, value, e)) - - else: - # Create a new entry - system_id = conn.new_system(token) - conn.modify_system(system_id, 'name', name, token) - result['changed'] = True - - if module.params['properties']: - for key, value in iteritems(module.params['properties']): - try: - conn.modify_system(system_id, key, value, token) - except Exception as e: - module.fail_json(msg="Unable to change '{0}' to '{1}'. {2}".format(key, value, e)) - - # Add interface properties - interface_properties = dict() - if module.params['interfaces']: - for device, values in iteritems(module.params['interfaces']): - for key, value in iteritems(values): - if key == 'name': - continue - if key not in IFPROPS_MAPPING: - module.warn("Property '{0}' is not a valid system property.".format(key)) - if not system or system['interfaces'][device][IFPROPS_MAPPING[key]] != value: - result['changed'] = True - interface_properties['{0}-{1}'.format(key, device)] = value - - if result['changed'] is True: - conn.modify_system(system_id, "modify_interface", interface_properties, token) - - # Only save when the entry was changed - if not module.check_mode and result['changed']: - conn.save_system(system_id, token) - - elif state == 'absent': - - if system: - if not module.check_mode: - conn.remove_system(name, token) - result['changed'] = True - - if not module.check_mode and module.params['sync'] and result['changed']: - try: - conn.sync(token) - except Exception as e: - module.fail_json(msg="Failed to sync Cobbler. {0}".format(to_text(e))) - - if state in ('absent', 'present'): - result['system'] = getsystem(conn, name, token) - - if module._diff: - result['diff'] = dict(before=system, after=result['system']) - - elapsed = datetime.datetime.utcnow() - start - module.exit_json(elapsed=elapsed.seconds, **result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/remote_management/imc/imc_rest.py b/ansible_collections/community/general/plugins/modules/remote_management/imc/imc_rest.py deleted file mode 100644 index b685e96b..00000000 --- a/ansible_collections/community/general/plugins/modules/remote_management/imc/imc_rest.py +++ /dev/null @@ -1,434 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2017, Dag Wieers -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: imc_rest -short_description: Manage Cisco IMC hardware through its REST API -description: -- Provides direct access to the Cisco IMC REST API. -- Perform any configuration changes and actions that the Cisco IMC supports. -- More information about the IMC REST API is available from - U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html) -author: -- Dag Wieers (@dagwieers) -requirements: -- lxml -- xmljson >= 0.1.8 -options: - hostname: - description: - - IP Address or hostname of Cisco IMC, resolvable by Ansible control host. - required: true - aliases: [ host, ip ] - type: str - username: - description: - - Username used to login to the switch. - default: admin - aliases: [ user ] - type: str - password: - description: - - The password to use for authentication. - default: password - type: str - path: - description: - - Name of the absolute path of the filename that includes the body - of the http request being sent to the Cisco IMC REST API. - - Parameter C(path) is mutual exclusive with parameter C(content). - aliases: [ 'src', 'config_file' ] - type: path - content: - description: - - When used instead of C(path), sets the content of the API requests directly. - - This may be convenient to template simple requests, for anything complex use the M(ansible.builtin.template) module. - - You can collate multiple IMC XML fragments and they will be processed sequentially in a single stream, - the Cisco IMC output is subsequently merged. - - Parameter C(content) is mutual exclusive with parameter C(path). - type: str - protocol: - description: - - Connection protocol to use. - default: https - choices: [ http, https ] - type: str - timeout: - description: - - The socket level timeout in seconds. - - This is the time that every single connection (every fragment) can spend. - If this C(timeout) is reached, the module will fail with a - C(Connection failure) indicating that C(The read operation timed out). - default: 60 - type: int - validate_certs: - description: - - If C(no), SSL certificates will not be validated. - - This should only set to C(no) used on personally controlled sites using self-signed certificates. - type: bool - default: 'yes' -notes: -- The XML fragments don't need an authentication cookie, this is injected by the module automatically. -- The Cisco IMC XML output is being translated to JSON using the Cobra convention. -- Any configConfMo change requested has a return status of 'modified', even if there was no actual change - from the previous configuration. As a result, this module will always report a change on subsequent runs. - In case this behaviour is fixed in a future update to Cisco IMC, this module will automatically adapt. -- If you get a C(Connection failure) related to C(The read operation timed out) increase the C(timeout) - parameter. Some XML fragments can take longer than the default timeout. -- More information about the IMC REST API is available from - U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html) -''' - -EXAMPLES = r''' -- name: Power down server - community.general.imc_rest: - hostname: '{{ imc_hostname }}' - username: '{{ imc_username }}' - password: '{{ imc_password }}' - validate_certs: no - content: | - - - - delegate_to: localhost - -- name: Configure IMC using multiple XML fragments - community.general.imc_rest: - hostname: '{{ imc_hostname }}' - username: '{{ imc_username }}' - password: '{{ imc_password }}' - validate_certs: no - timeout: 120 - content: | - - - - - - - - - - delegate_to: localhost - -- name: Enable PXE boot and power-cycle server - community.general.imc_rest: - hostname: '{{ imc_hostname }}' - username: '{{ imc_username }}' - password: '{{ imc_password }}' - validate_certs: no - content: | - - - - - - - - - - delegate_to: localhost - -- name: Reconfigure IMC to boot from storage - community.general.imc_rest: - hostname: '{{ imc_host }}' - username: '{{ imc_username }}' - password: '{{ imc_password }}' - validate_certs: no - content: | - - - - delegate_to: localhost - -- name: Add customer description to server - community.general.imc_rest: - hostname: '{{ imc_host }}' - username: '{{ imc_username }}' - password: '{{ imc_password }}' - validate_certs: no - content: | - - - - delegate_to: localhost - -- name: Disable HTTP and increase session timeout to max value 10800 secs - community.general.imc_rest: - hostname: '{{ imc_host }}' - username: '{{ imc_username }}' - password: '{{ imc_password }}' - validate_certs: no - timeout: 120 - content: | - - - - - - - - delegate_to: localhost -''' - -RETURN = r''' -aaLogin: - description: Cisco IMC XML output for the login, translated to JSON using Cobra convention - returned: success - type: dict - sample: | - "attributes": { - "cookie": "", - "outCookie": "1498902428/9de6dc36-417c-157c-106c-139efe2dc02a", - "outPriv": "admin", - "outRefreshPeriod": "600", - "outSessionId": "114", - "outVersion": "2.0(13e)", - "response": "yes" - } -configConfMo: - description: Cisco IMC XML output for any configConfMo XML fragments, translated to JSON using Cobra convention - returned: success - type: dict - sample: | -elapsed: - description: Elapsed time in seconds - returned: always - type: int - sample: 31 -response: - description: HTTP response message, including content length - returned: always - type: str - sample: OK (729 bytes) -status: - description: The HTTP response status code - returned: always - type: dict - sample: 200 -error: - description: Cisco IMC XML error output for last request, translated to JSON using Cobra convention - returned: failed - type: dict - sample: | - "attributes": { - "cookie": "", - "errorCode": "ERR-xml-parse-error", - "errorDescr": "XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed. ", - "invocationResult": "594", - "response": "yes" - } -error_code: - description: Cisco IMC error code - returned: failed - type: str - sample: ERR-xml-parse-error -error_text: - description: Cisco IMC error message - returned: failed - type: str - sample: | - XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed. -input: - description: RAW XML input sent to the Cisco IMC, causing the error - returned: failed - type: str - sample: | - -output: - description: RAW XML output received from the Cisco IMC, with error details - returned: failed - type: str - sample: > - -''' - -import datetime -import os -import traceback -from functools import partial - -LXML_ETREE_IMP_ERR = None -try: - import lxml.etree - HAS_LXML_ETREE = True -except ImportError: - LXML_ETREE_IMP_ERR = traceback.format_exc() - HAS_LXML_ETREE = False - -XMLJSON_COBRA_IMP_ERR = None -try: - from xmljson import cobra - HAS_XMLJSON_COBRA = True -except ImportError: - XMLJSON_COBRA_IMP_ERR = traceback.format_exc() - HAS_XMLJSON_COBRA = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.six.moves import zip_longest -from ansible.module_utils.urls import fetch_url - - -def imc_response(module, rawoutput, rawinput=''): - ''' Handle IMC returned data ''' - xmloutput = lxml.etree.fromstring(rawoutput) - result = cobra.data(xmloutput) - - # Handle errors - if xmloutput.get('errorCode') and xmloutput.get('errorDescr'): - if rawinput: - result['input'] = rawinput - result['output'] = rawoutput - result['error_code'] = xmloutput.get('errorCode') - result['error_text'] = xmloutput.get('errorDescr') - module.fail_json(msg='Request failed: %(error_text)s' % result, **result) - - return result - - -def logout(module, url, cookie, timeout): - ''' Perform a logout, if needed ''' - data = '' % (cookie, cookie) - resp, auth = fetch_url(module, url, data=data, method="POST", timeout=timeout) - - -def merge(one, two): - ''' Merge two complex nested datastructures into one''' - if isinstance(one, dict) and isinstance(two, dict): - copy = dict(one) - # copy.update({key: merge(one.get(key, None), two[key]) for key in two}) - copy.update(dict((key, merge(one.get(key, None), two[key])) for key in two)) - return copy - - elif isinstance(one, list) and isinstance(two, list): - return [merge(alpha, beta) for (alpha, beta) in zip_longest(one, two)] - - return one if two is None else two - - -def main(): - module = AnsibleModule( - argument_spec=dict( - hostname=dict(type='str', required=True, aliases=['host', 'ip']), - username=dict(type='str', default='admin', aliases=['user']), - password=dict(type='str', default='password', no_log=True), - content=dict(type='str'), - path=dict(type='path', aliases=['config_file', 'src']), - protocol=dict(type='str', default='https', choices=['http', 'https']), - timeout=dict(type='int', default=60), - validate_certs=dict(type='bool', default=True), - ), - supports_check_mode=True, - mutually_exclusive=[['content', 'path']], - ) - - if not HAS_LXML_ETREE: - module.fail_json(msg=missing_required_lib('lxml'), exception=LXML_ETREE_IMP_ERR) - - if not HAS_XMLJSON_COBRA: - module.fail_json(msg=missing_required_lib('xmljson >= 0.1.8'), exception=XMLJSON_COBRA_IMP_ERR) - - hostname = module.params['hostname'] - username = module.params['username'] - password = module.params['password'] - - content = module.params['content'] - path = module.params['path'] - - protocol = module.params['protocol'] - timeout = module.params['timeout'] - - result = dict( - failed=False, - changed=False, - ) - - # Report missing file - file_exists = False - if path: - if os.path.isfile(path): - file_exists = True - else: - module.fail_json(msg='Cannot find/access path:\n%s' % path) - - start = datetime.datetime.utcnow() - - # Perform login first - url = '%s://%s/nuova' % (protocol, hostname) - data = '' % (username, password) - resp, auth = fetch_url(module, url, data=data, method='POST', timeout=timeout) - if resp is None or auth['status'] != 200: - result['elapsed'] = (datetime.datetime.utcnow() - start).seconds - module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % auth, **result) - result.update(imc_response(module, resp.read())) - - # Store cookie for future requests - cookie = '' - try: - cookie = result['aaaLogin']['attributes']['outCookie'] - except Exception: - module.fail_json(msg='Could not find cookie in output', **result) - - try: - # Prepare request data - if content: - rawdata = content - elif file_exists: - with open(path, 'r') as config_object: - rawdata = config_object.read() - - # Wrap the XML documents in a element - xmldata = lxml.etree.fromstring('%s' % rawdata.replace('\n', '')) - - # Handle each XML document separately in the same session - for xmldoc in list(xmldata): - if xmldoc.tag is lxml.etree.Comment: - continue - # Add cookie to XML - xmldoc.set('cookie', cookie) - data = lxml.etree.tostring(xmldoc) - - # Perform actual request - resp, info = fetch_url(module, url, data=data, method='POST', timeout=timeout) - if resp is None or info['status'] != 200: - result['elapsed'] = (datetime.datetime.utcnow() - start).seconds - module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % info, **result) - - # Merge results with previous results - rawoutput = resp.read() - result = merge(result, imc_response(module, rawoutput, rawinput=data)) - result['response'] = info['msg'] - result['status'] = info['status'] - - # Check for any changes - # NOTE: Unfortunately IMC API always report status as 'modified' - xmloutput = lxml.etree.fromstring(rawoutput) - results = xmloutput.xpath('/configConfMo/outConfig/*/@status') - result['changed'] = ('modified' in results) - - # Report success - result['elapsed'] = (datetime.datetime.utcnow() - start).seconds - module.exit_json(**result) - finally: - logout(module, url, cookie, timeout) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/remote_management/ipmi/ipmi_boot.py b/ansible_collections/community/general/plugins/modules/remote_management/ipmi/ipmi_boot.py deleted file mode 100644 index f8cff0e7..00000000 --- a/ansible_collections/community/general/plugins/modules/remote_management/ipmi/ipmi_boot.py +++ /dev/null @@ -1,217 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: ipmi_boot -short_description: Management of order of boot devices -description: - - Use this module to manage order of boot devices -options: - name: - description: - - Hostname or ip address of the BMC. - required: true - type: str - port: - description: - - Remote RMCP port. - default: 623 - type: int - user: - description: - - Username to use to connect to the BMC. - required: true - type: str - password: - description: - - Password to connect to the BMC. - required: true - type: str - key: - description: - - Encryption key to connect to the BMC in hex format. - required: false - type: str - version_added: 4.1.0 - bootdev: - description: - - Set boot device to use on next reboot - - "The choices for the device are: - - network -- Request network boot - - floppy -- Boot from floppy - - hd -- Boot from hard drive - - safe -- Boot from hard drive, requesting 'safe mode' - - optical -- boot from CD/DVD/BD drive - - setup -- Boot into setup utility - - default -- remove any IPMI directed boot device request" - required: true - choices: - - network - - floppy - - hd - - safe - - optical - - setup - - default - type: str - state: - description: - - Whether to ensure that boot devices is desired. - - "The choices for the state are: - - present -- Request system turn on - - absent -- Request system turn on" - default: present - choices: [ present, absent ] - type: str - persistent: - description: - - If set, ask that system firmware uses this device beyond next boot. - Be aware many systems do not honor this. - type: bool - default: 'no' - uefiboot: - description: - - If set, request UEFI boot explicitly. - Strictly speaking, the spec suggests that if not set, the system should BIOS boot and offers no "don't care" option. - In practice, this flag not being set does not preclude UEFI boot on any system I've encountered. - type: bool - default: 'no' -requirements: - - "python >= 2.6" - - pyghmi -author: "Bulat Gaifullin (@bgaifullin) " -''' - -RETURN = ''' -bootdev: - description: The boot device name which will be used beyond next boot. - returned: success - type: str - sample: default -persistent: - description: If True, system firmware will use this device beyond next boot. - returned: success - type: bool - sample: false -uefimode: - description: If True, system firmware will use UEFI boot explicitly beyond next boot. - returned: success - type: bool - sample: false -''' - -EXAMPLES = ''' -- name: Ensure bootdevice is HD - community.general.ipmi_boot: - name: test.testdomain.com - user: admin - password: password - bootdev: hd - -- name: Ensure bootdevice is not Network - community.general.ipmi_boot: - name: test.testdomain.com - user: admin - password: password - key: 1234567890AABBCCDEFF000000EEEE12 - bootdev: network - state: absent -''' - -import traceback -import binascii - -PYGHMI_IMP_ERR = None -try: - from pyghmi.ipmi import command -except ImportError: - PYGHMI_IMP_ERR = traceback.format_exc() - command = None - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - port=dict(default=623, type='int'), - user=dict(required=True, no_log=True), - password=dict(required=True, no_log=True), - key=dict(type='str', no_log=True), - state=dict(default='present', choices=['present', 'absent']), - bootdev=dict(required=True, choices=['network', 'hd', 'floppy', 'safe', 'optical', 'setup', 'default']), - persistent=dict(default=False, type='bool'), - uefiboot=dict(default=False, type='bool') - ), - supports_check_mode=True, - ) - - if command is None: - module.fail_json(msg=missing_required_lib('pyghmi'), exception=PYGHMI_IMP_ERR) - - name = module.params['name'] - port = module.params['port'] - user = module.params['user'] - password = module.params['password'] - state = module.params['state'] - bootdev = module.params['bootdev'] - persistent = module.params['persistent'] - uefiboot = module.params['uefiboot'] - request = dict() - - if state == 'absent' and bootdev == 'default': - module.fail_json(msg="The bootdev 'default' cannot be used with state 'absent'.") - - try: - if module.params['key']: - key = binascii.unhexlify(module.params['key']) - else: - key = None - except Exception as e: - module.fail_json(msg="Unable to convert 'key' from hex string.") - - # --- run command --- - try: - ipmi_cmd = command.Command( - bmc=name, userid=user, password=password, port=port, kg=key - ) - module.debug('ipmi instantiated - name: "%s"' % name) - current = ipmi_cmd.get_bootdev() - # uefimode may not supported by BMC, so use desired value as default - current.setdefault('uefimode', uefiboot) - if state == 'present' and current != dict(bootdev=bootdev, persistent=persistent, uefimode=uefiboot): - request = dict(bootdev=bootdev, uefiboot=uefiboot, persist=persistent) - elif state == 'absent' and current['bootdev'] == bootdev: - request = dict(bootdev='default') - else: - module.exit_json(changed=False, **current) - - if module.check_mode: - response = dict(bootdev=request['bootdev']) - else: - response = ipmi_cmd.set_bootdev(**request) - - if 'error' in response: - module.fail_json(msg=response['error']) - - if 'persist' in request: - response['persistent'] = request['persist'] - if 'uefiboot' in request: - response['uefimode'] = request['uefiboot'] - - module.exit_json(changed=True, **response) - except Exception as e: - module.fail_json(msg=str(e)) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/remote_management/ipmi/ipmi_power.py b/ansible_collections/community/general/plugins/modules/remote_management/ipmi/ipmi_power.py deleted file mode 100644 index 9abf167f..00000000 --- a/ansible_collections/community/general/plugins/modules/remote_management/ipmi/ipmi_power.py +++ /dev/null @@ -1,269 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: ipmi_power -short_description: Power management for machine -description: - - Use this module for power management -options: - name: - description: - - Hostname or ip address of the BMC. - required: true - type: str - port: - description: - - Remote RMCP port. - default: 623 - type: int - user: - description: - - Username to use to connect to the BMC. - required: true - type: str - password: - description: - - Password to connect to the BMC. - required: true - type: str - key: - description: - - Encryption key to connect to the BMC in hex format. - required: false - type: str - version_added: 4.1.0 - state: - description: - - Whether to ensure that the machine in desired state. - - "The choices for state are: - - on -- Request system turn on - - off -- Request system turn off without waiting for OS to shutdown - - shutdown -- Have system request OS proper shutdown - - reset -- Request system reset without waiting for OS - - boot -- If system is off, then 'on', else 'reset'" - - Either this option or I(machine) is required. - choices: ['on', 'off', shutdown, reset, boot] - type: str - timeout: - description: - - Maximum number of seconds before interrupt request. - default: 300 - type: int - machine: - description: - - Provide a list of the remote target address for the bridge IPMI request, - and the power status. - - Either this option or I(state) is required. - required: false - type: list - elements: dict - version_added: 4.3.0 - suboptions: - targetAddress: - description: - - Remote target address for the bridge IPMI request. - type: int - required: true - state: - description: - - Whether to ensure that the machine specified by I(targetAddress) in desired state. - - If this option is not set, the power state is set by I(state). - - If both this option and I(state) are set, this option takes precedence over I(state). - choices: ['on', 'off', shutdown, reset, boot] - type: str - -requirements: - - "python >= 2.6" - - pyghmi -author: "Bulat Gaifullin (@bgaifullin) " -''' - -RETURN = ''' -powerstate: - description: The current power state of the machine. - returned: success and I(machine) is not provided - type: str - sample: on -status: - description: The current power state of the machine when the machine option is set. - returned: success and I(machine) is provided - type: list - elements: dict - version_added: 4.3.0 - contains: - powerstate: - description: The current power state of the machine specified by I(targetAddress). - type: str - targetAddress: - description: The remote target address. - type: int - sample: [ - { - "powerstate": "on", - "targetAddress": 48, - }, - { - "powerstate": "on", - "targetAddress": 50, - }, - ] -''' - -EXAMPLES = ''' -- name: Ensure machine is powered on - community.general.ipmi_power: - name: test.testdomain.com - user: admin - password: password - state: on - -- name: Ensure machines of which remote target address is 48 and 50 are powered off - community.general.ipmi_power: - name: test.testdomain.com - user: admin - password: password - state: off - machine: - - targetAddress: 48 - - targetAddress: 50 - -- name: Ensure machine of which remote target address is 48 is powered on, and 50 is powered off - community.general.ipmi_power: - name: test.testdomain.com - user: admin - password: password - machine: - - targetAddress: 48 - state: on - - targetAddress: 50 - state: off -''' - -import traceback -import binascii - -PYGHMI_IMP_ERR = None -INVALID_TARGET_ADDRESS = 0x100 -try: - from pyghmi.ipmi import command -except ImportError: - PYGHMI_IMP_ERR = traceback.format_exc() - command = None - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - port=dict(default=623, type='int'), - state=dict(choices=['on', 'off', 'shutdown', 'reset', 'boot']), - user=dict(required=True, no_log=True), - password=dict(required=True, no_log=True), - key=dict(type='str', no_log=True), - timeout=dict(default=300, type='int'), - machine=dict( - type='list', elements='dict', - options=dict( - targetAddress=dict(required=True, type='int'), - state=dict(type='str', choices=['on', 'off', 'shutdown', 'reset', 'boot']), - ), - ), - ), - supports_check_mode=True, - required_one_of=( - ['state', 'machine'], - ), - ) - - if command is None: - module.fail_json(msg=missing_required_lib('pyghmi'), exception=PYGHMI_IMP_ERR) - - name = module.params['name'] - port = module.params['port'] - user = module.params['user'] - password = module.params['password'] - state = module.params['state'] - timeout = module.params['timeout'] - machine = module.params['machine'] - - try: - if module.params['key']: - key = binascii.unhexlify(module.params['key']) - else: - key = None - except Exception: - module.fail_json(msg="Unable to convert 'key' from hex string.") - - # --- run command --- - try: - ipmi_cmd = command.Command( - bmc=name, userid=user, password=password, port=port, kg=key - ) - module.debug('ipmi instantiated - name: "%s"' % name) - - changed = False - if machine is None: - current = ipmi_cmd.get_power() - if current['powerstate'] != state: - response = {'powerstate': state} if module.check_mode \ - else ipmi_cmd.set_power(state, wait=timeout) - changed = True - else: - response = current - - if 'error' in response: - module.fail_json(msg=response['error']) - - module.exit_json(changed=changed, **response) - else: - response = [] - for entry in machine: - taddr = entry['targetAddress'] - if taddr >= INVALID_TARGET_ADDRESS: - module.fail_json(msg="targetAddress should be set between 0 to 255.") - - try: - # bridge_request is supported on pyghmi 1.5.30 and later - current = ipmi_cmd.get_power(bridge_request={"addr": taddr}) - except TypeError: - module.fail_json( - msg="targetAddress isn't supported on the installed pyghmi.") - - if entry['state']: - tstate = entry['state'] - elif state: - tstate = state - else: - module.fail_json(msg="Either state or suboption of machine state should be set.") - - if current['powerstate'] != tstate: - changed = True - if not module.check_mode: - new = ipmi_cmd.set_power(tstate, wait=timeout, bridge_request={"addr": taddr}) - if 'error' in new: - module.fail_json(msg=new['error']) - - response.append( - {'targetAddress:': taddr, 'powerstate': new['powerstate']}) - - if current['powerstate'] == tstate or module.check_mode: - response.append({'targetAddress:': taddr, 'powerstate': tstate}) - - module.exit_json(changed=changed, status=response) - except Exception as e: - module.fail_json(msg=str(e)) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/remote_management/lenovoxcc/xcc_redfish_command.py b/ansible_collections/community/general/plugins/modules/remote_management/lenovoxcc/xcc_redfish_command.py deleted file mode 100644 index f082f6cd..00000000 --- a/ansible_collections/community/general/plugins/modules/remote_management/lenovoxcc/xcc_redfish_command.py +++ /dev/null @@ -1,677 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: xcc_redfish_command -short_description: Manages Lenovo Out-Of-Band controllers using Redfish APIs -version_added: 2.4.0 -description: - - Builds Redfish URIs locally and sends them to remote OOB controllers to - perform an action or get information back or update a configuration attribute. - - Manages virtual media. - - Supports getting information back via GET method. - - Supports updating a configuration attribute via PATCH method. - - Supports performing an action via POST method. -options: - category: - required: true - description: - - Category to execute on OOB controller. - type: str - command: - required: true - description: - - List of commands to execute on OOB controller. - type: list - elements: str - baseuri: - required: true - description: - - Base URI of OOB controller. - type: str - username: - description: - - Username for authentication with OOB controller. - type: str - password: - description: - - Password for authentication with OOB controller. - type: str - auth_token: - description: - - Security token for authentication with OOB controller - type: str - timeout: - description: - - Timeout in seconds for URL requests to OOB controller. - default: 10 - type: int - resource_id: - required: false - description: - - The ID of the System, Manager or Chassis to modify. - type: str - virtual_media: - required: false - description: - - The options for VirtualMedia commands. - type: dict - suboptions: - media_types: - description: - - The list of media types appropriate for the image. - type: list - elements: str - image_url: - description: - - The URL of the image to insert or eject. - type: str - inserted: - description: - - Indicates if the image is treated as inserted on command completion. - type: bool - default: true - write_protected: - description: - - Indicates if the media is treated as write-protected. - type: bool - default: true - username: - description: - - The username for accessing the image URL. - type: str - password: - description: - - The password for accessing the image URL. - type: str - transfer_protocol_type: - description: - - The network protocol to use with the image. - type: str - transfer_method: - description: - - The transfer method to use with the image. - type: str - resource_uri: - required: false - description: - - The resource uri to get or patch or post. - type: str - request_body: - required: false - description: - - The request body to patch or post. - type: dict - -author: "Yuyan Pan (@panyy3)" -''' - -EXAMPLES = ''' - - name: Insert Virtual Media - community.general.xcc_redfish_command: - category: Manager - command: VirtualMediaInsert - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - virtual_media: - image_url: "http://example.com/images/SomeLinux-current.iso" - media_types: - - CD - - DVD - resource_id: "1" - - - name: Eject Virtual Media - community.general.xcc_redfish_command: - category: Manager - command: VirtualMediaEject - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - virtual_media: - image_url: "http://example.com/images/SomeLinux-current.iso" - resource_id: "1" - - - name: Eject all Virtual Media - community.general.xcc_redfish_command: - category: Manager - command: VirtualMediaEject - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - resource_id: "1" - - - name: Get ComputeSystem Oem property SystemStatus via GetResource command - community.general.xcc_redfish_command: - category: Raw - command: GetResource - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - resource_uri: "/redfish/v1/Systems/1" - register: result - - ansible.builtin.debug: - msg: "{{ result.redfish_facts.data.Oem.Lenovo.SystemStatus }}" - - - name: Get Oem DNS setting via GetResource command - community.general.xcc_redfish_command: - category: Raw - command: GetResource - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - resource_uri: "/redfish/v1/Managers/1/NetworkProtocol/Oem/Lenovo/DNS" - register: result - - - name: Print fetched information - ansible.builtin.debug: - msg: "{{ result.redfish_facts.data }}" - - - name: Get Lenovo FoD key collection resource via GetCollectionResource command - community.general.xcc_redfish_command: - category: Raw - command: GetCollectionResource - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - resource_uri: "/redfish/v1/Managers/1/Oem/Lenovo/FoD/Keys" - register: result - - - name: Print fetched information - ansible.builtin.debug: - msg: "{{ result.redfish_facts.data_list }}" - - - name: Update ComputeSystem property AssetTag via PatchResource command - community.general.xcc_redfish_command: - category: Raw - command: PatchResource - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - resource_uri: "/redfish/v1/Systems/1" - request_body: - AssetTag: "new_asset_tag" - - - name: Perform BootToBIOSSetup action via PostResource command - community.general.xcc_redfish_command: - category: Raw - command: PostResource - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - resource_uri: "/redfish/v1/Systems/1/Actions/Oem/LenovoComputerSystem.BootToBIOSSetup" - request_body: {} - - - name: Perform SecureBoot.ResetKeys action via PostResource command - community.general.xcc_redfish_command: - category: Raw - command: PostResource - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - resource_uri: "/redfish/v1/Systems/1/SecureBoot/Actions/SecureBoot.ResetKeys" - request_body: - ResetKeysType: DeleteAllKeys - - - name: Create session - community.general.redfish_command: - category: Sessions - command: CreateSession - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result - - - name: Update Manager DateTimeLocalOffset property using security token for auth - community.general.xcc_redfish_command: - category: Raw - command: PatchResource - baseuri: "{{ baseuri }}" - auth_token: "{{ result.session.token }}" - resource_uri: "/redfish/v1/Managers/1" - request_body: - DateTimeLocalOffset: "+08:00" - - - name: Delete session using security token created by CreateSesssion above - community.general.redfish_command: - category: Sessions - command: DeleteSession - baseuri: "{{ baseuri }}" - auth_token: "{{ result.session.token }}" - session_uri: "{{ result.session.uri }}" -''' - -RETURN = ''' -msg: - description: A message related to the performed action(s). - returned: when failure or action/update success - type: str - sample: "Action was successful" -redfish_facts: - description: Resource content. - returned: when command == GetResource or command == GetCollectionResource - type: dict - sample: '{ - "redfish_facts": { - "data": { - "@odata.etag": "\"3179bf00d69f25a8b3c\"", - "@odata.id": "/redfish/v1/Managers/1/NetworkProtocol/Oem/Lenovo/DNS", - "@odata.type": "#LenovoDNS.v1_0_0.LenovoDNS", - "DDNS": [ - { - "DDNSEnable": true, - "DomainName": "", - "DomainNameSource": "DHCP" - } - ], - "DNSEnable": true, - "Description": "This resource is used to represent a DNS resource for a Redfish implementation.", - "IPv4Address1": "10.103.62.178", - "IPv4Address2": "0.0.0.0", - "IPv4Address3": "0.0.0.0", - "IPv6Address1": "::", - "IPv6Address2": "::", - "IPv6Address3": "::", - "Id": "LenovoDNS", - "PreferredAddresstype": "IPv4" - }, - "ret": true - } - }' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils - - -class XCCRedfishUtils(RedfishUtils): - @staticmethod - def _find_empty_virt_media_slot(resources, media_types, - media_match_strict=True): - for uri, data in resources.items(): - # check MediaTypes - if 'MediaTypes' in data and media_types: - if not set(media_types).intersection(set(data['MediaTypes'])): - continue - else: - if media_match_strict: - continue - if 'RDOC' in uri: - continue - # if ejected, 'Inserted' should be False and 'ImageName' cleared - if (not data.get('Inserted', False) and - not data.get('ImageName')): - return uri, data - return None, None - - def virtual_media_eject_one(self, image_url): - # locate and read the VirtualMedia resources - response = self.get_request(self.root_uri + self.manager_uri) - if response['ret'] is False: - return response - data = response['data'] - if 'VirtualMedia' not in data: - return {'ret': False, 'msg': "VirtualMedia resource not found"} - virt_media_uri = data["VirtualMedia"]["@odata.id"] - response = self.get_request(self.root_uri + virt_media_uri) - if response['ret'] is False: - return response - data = response['data'] - virt_media_list = [] - for member in data[u'Members']: - virt_media_list.append(member[u'@odata.id']) - resources, headers = self._read_virt_media_resources(virt_media_list) - - # find the VirtualMedia resource to eject - uri, data, eject = self._find_virt_media_to_eject(resources, image_url) - if uri and eject: - if ('Actions' not in data or - '#VirtualMedia.EjectMedia' not in data['Actions']): - # try to eject via PATCH if no EjectMedia action found - h = headers[uri] - if 'allow' in h: - methods = [m.strip() for m in h.get('allow').split(',')] - if 'PATCH' not in methods: - # if Allow header present and PATCH missing, return error - return {'ret': False, - 'msg': "%s action not found and PATCH not allowed" - % '#VirtualMedia.EjectMedia'} - return self.virtual_media_eject_via_patch(uri) - else: - # POST to the EjectMedia Action - action = data['Actions']['#VirtualMedia.EjectMedia'] - if 'target' not in action: - return {'ret': False, - 'msg': "target URI property missing from Action " - "#VirtualMedia.EjectMedia"} - action_uri = action['target'] - # empty payload for Eject action - payload = {} - # POST to action - response = self.post_request(self.root_uri + action_uri, - payload) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True, - 'msg': "VirtualMedia ejected"} - elif uri and not eject: - # already ejected: return success but changed=False - return {'ret': True, 'changed': False, - 'msg': "VirtualMedia image '%s' already ejected" % - image_url} - else: - # return failure (no resources matching image_url found) - return {'ret': False, 'changed': False, - 'msg': "No VirtualMedia resource found with image '%s' " - "inserted" % image_url} - - def virtual_media_eject(self, options): - if options: - image_url = options.get('image_url') - if image_url: # eject specified one media - return self.virtual_media_eject_one(image_url) - - # eject all inserted media when no image_url specified - # read all the VirtualMedia resources - response = self.get_request(self.root_uri + self.manager_uri) - if response['ret'] is False: - return response - data = response['data'] - if 'VirtualMedia' not in data: - return {'ret': False, 'msg': "VirtualMedia resource not found"} - virt_media_uri = data["VirtualMedia"]["@odata.id"] - response = self.get_request(self.root_uri + virt_media_uri) - if response['ret'] is False: - return response - data = response['data'] - virt_media_list = [] - for member in data[u'Members']: - virt_media_list.append(member[u'@odata.id']) - resources, headers = self._read_virt_media_resources(virt_media_list) - - # eject all inserted media one by one - ejected_media_list = [] - for uri, data in resources.items(): - if data.get('Image') and data.get('Inserted', True): - returndict = self.virtual_media_eject_one(data.get('Image')) - if not returndict['ret']: - return returndict - ejected_media_list.append(data.get('Image')) - - if len(ejected_media_list) == 0: - # no media inserted: return success but changed=False - return {'ret': True, 'changed': False, - 'msg': "No VirtualMedia image inserted"} - else: - return {'ret': True, 'changed': True, - 'msg': "VirtualMedia %s ejected" % str(ejected_media_list)} - - def raw_get_resource(self, resource_uri): - if resource_uri is None: - return {'ret': False, 'msg': "resource_uri is missing"} - response = self.get_request(self.root_uri + resource_uri) - if response['ret'] is False: - return response - data = response['data'] - return {'ret': True, 'data': data} - - def raw_get_collection_resource(self, resource_uri): - if resource_uri is None: - return {'ret': False, 'msg': "resource_uri is missing"} - response = self.get_request(self.root_uri + resource_uri) - if response['ret'] is False: - return response - if 'Members' not in response['data']: - return {'ret': False, 'msg': "Specified resource_uri doesn't have Members property"} - member_list = [i['@odata.id'] for i in response['data'].get('Members', [])] - - # get member resource one by one - data_list = [] - for member_uri in member_list: - uri = self.root_uri + member_uri - response = self.get_request(uri) - if response['ret'] is False: - return response - data = response['data'] - data_list.append(data) - - return {'ret': True, 'data_list': data_list} - - def raw_patch_resource(self, resource_uri, request_body): - if resource_uri is None: - return {'ret': False, 'msg': "resource_uri is missing"} - if request_body is None: - return {'ret': False, 'msg': "request_body is missing"} - # check whether resource_uri existing or not - response = self.get_request(self.root_uri + resource_uri) - if response['ret'] is False: - return response - original_etag = response['data']['@odata.etag'] - - # check validity of keys in request_body - data = response['data'] - for key in request_body.keys(): - if key not in data: - return {'ret': False, 'msg': "Key %s not found. Supported key list: %s" % (key, str(data.keys()))} - - # perform patch - response = self.patch_request(self.root_uri + resource_uri, request_body) - if response['ret'] is False: - return response - - # check whether changed or not - current_etag = '' - if 'data' in response and '@odata.etag' in response['data']: - current_etag = response['data']['@odata.etag'] - if current_etag != original_etag: - return {'ret': True, 'changed': True} - else: - return {'ret': True, 'changed': False} - - def raw_post_resource(self, resource_uri, request_body): - if resource_uri is None: - return {'ret': False, 'msg': "resource_uri is missing"} - if '/Actions/' not in resource_uri: - return {'ret': False, 'msg': "Bad uri %s. Keyword /Actions/ should be included in uri" % resource_uri} - if request_body is None: - return {'ret': False, 'msg': "request_body is missing"} - # get action base uri data for further checking - action_base_uri = resource_uri.split('/Actions/')[0] - response = self.get_request(self.root_uri + action_base_uri) - if response['ret'] is False: - return response - if 'Actions' not in response['data']: - return {'ret': False, 'msg': "Actions property not found in %s" % action_base_uri} - - # check resouce_uri with target uri found in action base uri data - action_found = False - action_info_uri = None - action_target_uri_list = [] - for key in response['data']['Actions'].keys(): - if action_found: - break - if not key.startswith('#'): - continue - if 'target' in response['data']['Actions'][key]: - if resource_uri == response['data']['Actions'][key]['target']: - action_found = True - if '@Redfish.ActionInfo' in response['data']['Actions'][key]: - action_info_uri = response['data']['Actions'][key]['@Redfish.ActionInfo'] - else: - action_target_uri_list.append(response['data']['Actions'][key]['target']) - if not action_found and 'Oem' in response['data']['Actions']: - for key in response['data']['Actions']['Oem'].keys(): - if action_found: - break - if not key.startswith('#'): - continue - if 'target' in response['data']['Actions']['Oem'][key]: - if resource_uri == response['data']['Actions']['Oem'][key]['target']: - action_found = True - if '@Redfish.ActionInfo' in response['data']['Actions']['Oem'][key]: - action_info_uri = response['data']['Actions']['Oem'][key]['@Redfish.ActionInfo'] - else: - action_target_uri_list.append(response['data']['Actions']['Oem'][key]['target']) - - if not action_found: - return {'ret': False, - 'msg': 'Specified resource_uri is not a supported action target uri, please specify a supported target uri instead. Supported uri: %s' - % (str(action_target_uri_list))} - - # check request_body with parameter name defined by @Redfish.ActionInfo - if action_info_uri is not None: - response = self.get_request(self.root_uri + action_info_uri) - if response['ret'] is False: - return response - for key in request_body.keys(): - key_found = False - for para in response['data']['Parameters']: - if key == para['Name']: - key_found = True - break - if not key_found: - return {'ret': False, - 'msg': 'Invalid property %s found in request_body. Please refer to @Redfish.ActionInfo Parameters: %s' - % (key, str(response['data']['Parameters']))} - - # perform post - response = self.post_request(self.root_uri + resource_uri, request_body) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True} - - -# More will be added as module features are expanded -CATEGORY_COMMANDS_ALL = { - "Manager": ["VirtualMediaInsert", - "VirtualMediaEject"], - "Raw": ["GetResource", - "GetCollectionResource", - "PatchResource", - "PostResource"] -} - - -def main(): - result = {} - module = AnsibleModule( - argument_spec=dict( - category=dict(required=True), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - timeout=dict(type='int', default=10), - resource_id=dict(), - virtual_media=dict( - type='dict', - options=dict( - media_types=dict(type='list', elements='str', default=[]), - image_url=dict(), - inserted=dict(type='bool', default=True), - write_protected=dict(type='bool', default=True), - username=dict(), - password=dict(no_log=True), - transfer_protocol_type=dict(), - transfer_method=dict(), - ) - ), - resource_uri=dict(), - request_body=dict( - type='dict', - ), - ), - required_together=[ - ('username', 'password'), - ], - required_one_of=[ - ('username', 'auth_token'), - ], - mutually_exclusive=[ - ('username', 'auth_token'), - ], - supports_check_mode=False - ) - - category = module.params['category'] - command_list = module.params['command'] - - # admin credentials used for authentication - creds = {'user': module.params['username'], - 'pswd': module.params['password'], - 'token': module.params['auth_token']} - - # timeout - timeout = module.params['timeout'] - - # System, Manager or Chassis ID to modify - resource_id = module.params['resource_id'] - - # VirtualMedia options - virtual_media = module.params['virtual_media'] - - # resource_uri - resource_uri = module.params['resource_uri'] - - # request_body - request_body = module.params['request_body'] - - # Build root URI - root_uri = "https://" + module.params['baseuri'] - rf_utils = XCCRedfishUtils(creds, root_uri, timeout, module, resource_id=resource_id, data_modification=True) - - # Check that Category is valid - if category not in CATEGORY_COMMANDS_ALL: - module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys()))) - - # Check that all commands are valid - for cmd in command_list: - # Fail if even one command given is invalid - if cmd not in CATEGORY_COMMANDS_ALL[category]: - module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) - - # Organize by Categories / Commands - if category == "Manager": - # execute only if we find a Manager service resource - result = rf_utils._find_managers_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) - - for command in command_list: - if command == 'VirtualMediaInsert': - result = rf_utils.virtual_media_insert(virtual_media) - elif command == 'VirtualMediaEject': - result = rf_utils.virtual_media_eject(virtual_media) - elif category == "Raw": - for command in command_list: - if command == 'GetResource': - result = rf_utils.raw_get_resource(resource_uri) - elif command == 'GetCollectionResource': - result = rf_utils.raw_get_collection_resource(resource_uri) - elif command == 'PatchResource': - result = rf_utils.raw_patch_resource(resource_uri, request_body) - elif command == 'PostResource': - result = rf_utils.raw_post_resource(resource_uri, request_body) - - # Return data back or fail with proper message - if result['ret'] is True: - if command == 'GetResource' or command == 'GetCollectionResource': - module.exit_json(redfish_facts=result) - else: - changed = result.get('changed', True) - msg = result.get('msg', 'Action was successful') - module.exit_json(changed=changed, msg=msg) - else: - module.fail_json(msg=to_native(result['msg'])) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/remote_management/lxca/lxca_cmms.py b/ansible_collections/community/general/plugins/modules/remote_management/lxca/lxca_cmms.py deleted file mode 100644 index b3bb6c2a..00000000 --- a/ansible_collections/community/general/plugins/modules/remote_management/lxca/lxca_cmms.py +++ /dev/null @@ -1,174 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) -# -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -author: - - Naval Patel (@navalkp) - - Prashant Bhosale (@prabhosa) -module: lxca_cmms -short_description: Custom module for lxca cmms inventory utility -description: - - This module returns/displays a inventory details of cmms - -options: - uuid: - description: - uuid of device, this is string with length greater than 16. - type: str - - command_options: - description: - options to filter nodes information - default: cmms - choices: - - cmms - - cmms_by_uuid - - cmms_by_chassis_uuid - type: str - - chassis: - description: - uuid of chassis, this is string with length greater than 16. - type: str - -extends_documentation_fragment: -- community.general.lxca_common - -''' - -EXAMPLES = ''' -# get all cmms info -- name: Get nodes data from LXCA - community.general.lxca_cmms: - login_user: USERID - login_password: Password - auth_url: "https://10.243.15.168" - -# get specific cmms info by uuid -- name: Get nodes data from LXCA - community.general.lxca_cmms: - login_user: USERID - login_password: Password - auth_url: "https://10.243.15.168" - uuid: "3C737AA5E31640CE949B10C129A8B01F" - command_options: cmms_by_uuid - -# get specific cmms info by chassis uuid -- name: Get nodes data from LXCA - community.general.lxca_cmms: - login_user: USERID - login_password: Password - auth_url: "https://10.243.15.168" - chassis: "3C737AA5E31640CE949B10C129A8B01F" - command_options: cmms_by_chassis_uuid - -''' - -RETURN = r''' -result: - description: cmms detail from lxca - returned: success - type: dict - sample: - cmmList: - - machineType: '' - model: '' - type: 'CMM' - uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF' - # bunch of properties - - machineType: '' - model: '' - type: 'CMM' - uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF' - # bunch of properties - # Multiple cmms details -''' - -import traceback -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common import LXCA_COMMON_ARGS, has_pylxca, connection_object -try: - from pylxca import cmms -except ImportError: - pass - - -UUID_REQUIRED = 'UUID of device is required for cmms_by_uuid command.' -CHASSIS_UUID_REQUIRED = 'UUID of chassis is required for cmms_by_chassis_uuid command.' -SUCCESS_MSG = "Success %s result" - - -def _cmms(module, lxca_con): - return cmms(lxca_con) - - -def _cmms_by_uuid(module, lxca_con): - if not module.params['uuid']: - module.fail_json(msg=UUID_REQUIRED) - return cmms(lxca_con, module.params['uuid']) - - -def _cmms_by_chassis_uuid(module, lxca_con): - if not module.params['chassis']: - module.fail_json(msg=CHASSIS_UUID_REQUIRED) - return cmms(lxca_con, chassis=module.params['chassis']) - - -def setup_module_object(): - """ - this function merge argument spec and create ansible module object - :return: - """ - args_spec = dict(LXCA_COMMON_ARGS) - args_spec.update(INPUT_ARG_SPEC) - module = AnsibleModule(argument_spec=args_spec, supports_check_mode=False) - - return module - - -FUNC_DICT = { - 'cmms': _cmms, - 'cmms_by_uuid': _cmms_by_uuid, - 'cmms_by_chassis_uuid': _cmms_by_chassis_uuid, -} - - -INPUT_ARG_SPEC = dict( - command_options=dict(default='cmms', choices=['cmms', 'cmms_by_uuid', - 'cmms_by_chassis_uuid']), - uuid=dict(default=None), - chassis=dict(default=None) -) - - -def execute_module(module): - """ - This function invoke commands - :param module: Ansible module object - """ - try: - with connection_object(module) as lxca_con: - result = FUNC_DICT[module.params['command_options']](module, lxca_con) - module.exit_json(changed=False, - msg=SUCCESS_MSG % module.params['command_options'], - result=result) - except Exception as exception: - error_msg = '; '.join((e) for e in exception.args) - module.fail_json(msg=error_msg, exception=traceback.format_exc()) - - -def main(): - module = setup_module_object() - has_pylxca(module) - execute_module(module) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/remote_management/lxca/lxca_nodes.py b/ansible_collections/community/general/plugins/modules/remote_management/lxca/lxca_nodes.py deleted file mode 100644 index 62b8e334..00000000 --- a/ansible_collections/community/general/plugins/modules/remote_management/lxca/lxca_nodes.py +++ /dev/null @@ -1,204 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) -# -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -author: - - Naval Patel (@navalkp) - - Prashant Bhosale (@prabhosa) -module: lxca_nodes -short_description: Custom module for lxca nodes inventory utility -description: - - This module returns/displays a inventory details of nodes - -options: - uuid: - description: - uuid of device, this is string with length greater than 16. - type: str - - command_options: - description: - options to filter nodes information - default: nodes - choices: - - nodes - - nodes_by_uuid - - nodes_by_chassis_uuid - - nodes_status_managed - - nodes_status_unmanaged - type: str - - chassis: - description: - uuid of chassis, this is string with length greater than 16. - type: str - -extends_documentation_fragment: -- community.general.lxca_common - -''' - -EXAMPLES = ''' -# get all nodes info -- name: Get nodes data from LXCA - community.general.lxca_nodes: - login_user: USERID - login_password: Password - auth_url: "https://10.243.15.168" - command_options: nodes - -# get specific nodes info by uuid -- name: Get nodes data from LXCA - community.general.lxca_nodes: - login_user: USERID - login_password: Password - auth_url: "https://10.243.15.168" - uuid: "3C737AA5E31640CE949B10C129A8B01F" - command_options: nodes_by_uuid - -# get specific nodes info by chassis uuid -- name: Get nodes data from LXCA - community.general.lxca_nodes: - login_user: USERID - login_password: Password - auth_url: "https://10.243.15.168" - chassis: "3C737AA5E31640CE949B10C129A8B01F" - command_options: nodes_by_chassis_uuid - -# get managed nodes -- name: Get nodes data from LXCA - community.general.lxca_nodes: - login_user: USERID - login_password: Password - auth_url: "https://10.243.15.168" - command_options: nodes_status_managed - -# get unmanaged nodes -- name: Get nodes data from LXCA - community.general.lxca_nodes: - login_user: USERID - login_password: Password - auth_url: "https://10.243.15.168" - command_options: nodes_status_unmanaged - -''' - -RETURN = r''' -result: - description: nodes detail from lxca - returned: always - type: dict - sample: - nodeList: - - machineType: '6241' - model: 'AC1' - type: 'Rack-TowerServer' - uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF' - # bunch of properties - - machineType: '8871' - model: 'AC1' - type: 'Rack-TowerServer' - uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF' - # bunch of properties - # Multiple nodes details -''' - -import traceback -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common import LXCA_COMMON_ARGS, has_pylxca, connection_object -try: - from pylxca import nodes -except ImportError: - pass - - -UUID_REQUIRED = 'UUID of device is required for nodes_by_uuid command.' -CHASSIS_UUID_REQUIRED = 'UUID of chassis is required for nodes_by_chassis_uuid command.' -SUCCESS_MSG = "Success %s result" - - -def _nodes(module, lxca_con): - return nodes(lxca_con) - - -def _nodes_by_uuid(module, lxca_con): - if not module.params['uuid']: - module.fail_json(msg=UUID_REQUIRED) - return nodes(lxca_con, module.params['uuid']) - - -def _nodes_by_chassis_uuid(module, lxca_con): - if not module.params['chassis']: - module.fail_json(msg=CHASSIS_UUID_REQUIRED) - return nodes(lxca_con, chassis=module.params['chassis']) - - -def _nodes_status_managed(module, lxca_con): - return nodes(lxca_con, status='managed') - - -def _nodes_status_unmanaged(module, lxca_con): - return nodes(lxca_con, status='unmanaged') - - -def setup_module_object(): - """ - this function merge argument spec and create ansible module object - :return: - """ - args_spec = dict(LXCA_COMMON_ARGS) - args_spec.update(INPUT_ARG_SPEC) - module = AnsibleModule(argument_spec=args_spec, supports_check_mode=False) - - return module - - -FUNC_DICT = { - 'nodes': _nodes, - 'nodes_by_uuid': _nodes_by_uuid, - 'nodes_by_chassis_uuid': _nodes_by_chassis_uuid, - 'nodes_status_managed': _nodes_status_managed, - 'nodes_status_unmanaged': _nodes_status_unmanaged, -} - - -INPUT_ARG_SPEC = dict( - command_options=dict(default='nodes', choices=['nodes', 'nodes_by_uuid', - 'nodes_by_chassis_uuid', - 'nodes_status_managed', - 'nodes_status_unmanaged']), - uuid=dict(default=None), chassis=dict(default=None) -) - - -def execute_module(module): - """ - This function invoke commands - :param module: Ansible module object - """ - try: - with connection_object(module) as lxca_con: - result = FUNC_DICT[module.params['command_options']](module, lxca_con) - module.exit_json(changed=False, - msg=SUCCESS_MSG % module.params['command_options'], - result=result) - except Exception as exception: - error_msg = '; '.join(exception.args) - module.fail_json(msg=error_msg, exception=traceback.format_exc()) - - -def main(): - module = setup_module_object() - has_pylxca(module) - execute_module(module) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_command.py b/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_command.py deleted file mode 100644 index 5e02154e..00000000 --- a/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_command.py +++ /dev/null @@ -1,215 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2018 Dell EMC Inc. -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: idrac_redfish_command -short_description: Manages Out-Of-Band controllers using iDRAC OEM Redfish APIs -description: - - Builds Redfish URIs locally and sends them to remote OOB controllers to - perform an action. - - For use with Dell iDRAC operations that require Redfish OEM extensions -options: - category: - required: true - description: - - Category to execute on OOB controller - type: str - command: - required: true - description: - - List of commands to execute on OOB controller - type: list - elements: str - baseuri: - required: true - description: - - Base URI of OOB controller - type: str - username: - description: - - User for authentication with OOB controller - type: str - password: - description: - - Password for authentication with OOB controller - type: str - auth_token: - description: - - Security token for authentication with OOB controller - type: str - version_added: 2.3.0 - timeout: - description: - - Timeout in seconds for URL requests to OOB controller - default: 10 - type: int - resource_id: - required: false - description: - - The ID of the System, Manager or Chassis to modify - type: str - version_added: '0.2.0' - -author: "Jose Delarosa (@jose-delarosa)" -''' - -EXAMPLES = ''' - - name: Create BIOS configuration job (schedule BIOS setting update) - community.general.idrac_redfish_command: - category: Systems - command: CreateBiosConfigJob - resource_id: System.Embedded.1 - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" -''' - -RETURN = ''' -msg: - description: Message with action result or error description - returned: always - type: str - sample: "Action was successful" -''' - -import re -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils -from ansible.module_utils.common.text.converters import to_native - - -class IdracRedfishUtils(RedfishUtils): - - def create_bios_config_job(self): - result = {} - key = "Bios" - jobs = "Jobs" - - # Search for 'key' entry and extract URI from it - response = self.get_request(self.root_uri + self.systems_uris[0]) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - - if key not in data: - return {'ret': False, 'msg': "Key %s not found" % key} - - bios_uri = data[key]["@odata.id"] - - # Extract proper URI - response = self.get_request(self.root_uri + bios_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - set_bios_attr_uri = data["@Redfish.Settings"]["SettingsObject"][ - "@odata.id"] - - payload = {"TargetSettingsURI": set_bios_attr_uri} - response = self.post_request( - self.root_uri + self.manager_uri + "/" + jobs, payload) - if response['ret'] is False: - return response - - response_output = response['resp'].__dict__ - job_id = response_output["headers"]["Location"] - job_id = re.search("JID_.+", job_id).group() - # Currently not passing job_id back to user but patch is coming - return {'ret': True, 'msg': "Config job %s created" % job_id} - - -CATEGORY_COMMANDS_ALL = { - "Systems": ["CreateBiosConfigJob"], - "Accounts": [], - "Manager": [] -} - - -def main(): - result = {} - module = AnsibleModule( - argument_spec=dict( - category=dict(required=True), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - timeout=dict(type='int', default=10), - resource_id=dict() - ), - required_together=[ - ('username', 'password'), - ], - required_one_of=[ - ('username', 'auth_token'), - ], - mutually_exclusive=[ - ('username', 'auth_token'), - ], - supports_check_mode=False - ) - - category = module.params['category'] - command_list = module.params['command'] - - # admin credentials used for authentication - creds = {'user': module.params['username'], - 'pswd': module.params['password'], - 'token': module.params['auth_token']} - - # timeout - timeout = module.params['timeout'] - - # System, Manager or Chassis ID to modify - resource_id = module.params['resource_id'] - - # Build root URI - root_uri = "https://" + module.params['baseuri'] - rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module, - resource_id=resource_id, data_modification=True) - - # Check that Category is valid - if category not in CATEGORY_COMMANDS_ALL: - module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) - - # Check that all commands are valid - for cmd in command_list: - # Fail if even one command given is invalid - if cmd not in CATEGORY_COMMANDS_ALL[category]: - module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) - - # Organize by Categories / Commands - - if category == "Systems": - # execute only if we find a System resource - result = rf_utils._find_systems_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) - - for command in command_list: - if command == "CreateBiosConfigJob": - # execute only if we find a Managers resource - result = rf_utils._find_managers_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) - result = rf_utils.create_bios_config_job() - - # Return data back or fail with proper message - if result['ret'] is True: - del result['ret'] - module.exit_json(changed=True, msg='Action was successful') - else: - module.fail_json(msg=to_native(result['msg'])) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_config.py b/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_config.py deleted file mode 100644 index adea4b11..00000000 --- a/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_config.py +++ /dev/null @@ -1,331 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2019 Dell EMC Inc. -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: idrac_redfish_config -short_description: Manages servers through iDRAC using Dell Redfish APIs -description: - - For use with Dell iDRAC operations that require Redfish OEM extensions - - Builds Redfish URIs locally and sends them to remote iDRAC controllers to - set or update a configuration attribute. -options: - category: - required: true - type: str - description: - - Category to execute on iDRAC - command: - required: true - description: - - List of commands to execute on iDRAC - - I(SetManagerAttributes), I(SetLifecycleControllerAttributes) and - I(SetSystemAttributes) are mutually exclusive commands when C(category) - is I(Manager) - type: list - elements: str - baseuri: - required: true - description: - - Base URI of iDRAC - type: str - username: - description: - - User for authentication with iDRAC - type: str - password: - description: - - Password for authentication with iDRAC - type: str - auth_token: - description: - - Security token for authentication with OOB controller - type: str - version_added: 2.3.0 - manager_attributes: - required: false - description: - - dictionary of iDRAC attribute name and value pairs to update - default: {} - type: 'dict' - version_added: '0.2.0' - timeout: - description: - - Timeout in seconds for URL requests to iDRAC controller - default: 10 - type: int - resource_id: - required: false - description: - - The ID of the System, Manager or Chassis to modify - type: str - version_added: '0.2.0' - -author: "Jose Delarosa (@jose-delarosa)" -''' - -EXAMPLES = ''' - - name: Enable NTP and set NTP server and Time zone attributes in iDRAC - community.general.idrac_redfish_config: - category: Manager - command: SetManagerAttributes - resource_id: iDRAC.Embedded.1 - manager_attributes: - NTPConfigGroup.1.NTPEnable: "Enabled" - NTPConfigGroup.1.NTP1: "{{ ntpserver1 }}" - Time.1.Timezone: "{{ timezone }}" - baseuri: "{{ baseuri }}" - username: "{{ username}}" - password: "{{ password }}" - - - name: Enable Syslog and set Syslog servers in iDRAC - community.general.idrac_redfish_config: - category: Manager - command: SetManagerAttributes - resource_id: iDRAC.Embedded.1 - manager_attributes: - SysLog.1.SysLogEnable: "Enabled" - SysLog.1.Server1: "{{ syslog_server1 }}" - SysLog.1.Server2: "{{ syslog_server2 }}" - baseuri: "{{ baseuri }}" - username: "{{ username}}" - password: "{{ password }}" - - - name: Configure SNMP community string, port, protocol and trap format - community.general.idrac_redfish_config: - category: Manager - command: SetManagerAttributes - resource_id: iDRAC.Embedded.1 - manager_attributes: - SNMP.1.AgentEnable: "Enabled" - SNMP.1.AgentCommunity: "public_community_string" - SNMP.1.TrapFormat: "SNMPv1" - SNMP.1.SNMPProtocol: "All" - SNMP.1.DiscoveryPort: 161 - SNMP.1.AlertPort: 162 - baseuri: "{{ baseuri }}" - username: "{{ username}}" - password: "{{ password }}" - - - name: Enable CSIOR - community.general.idrac_redfish_config: - category: Manager - command: SetLifecycleControllerAttributes - resource_id: iDRAC.Embedded.1 - manager_attributes: - LCAttributes.1.CollectSystemInventoryOnRestart: "Enabled" - baseuri: "{{ baseuri }}" - username: "{{ username}}" - password: "{{ password }}" - - - name: Set Power Supply Redundancy Policy to A/B Grid Redundant - community.general.idrac_redfish_config: - category: Manager - command: SetSystemAttributes - resource_id: iDRAC.Embedded.1 - manager_attributes: - ServerPwr.1.PSRedPolicy: "A/B Grid Redundant" - baseuri: "{{ baseuri }}" - username: "{{ username}}" - password: "{{ password }}" -''' - -RETURN = ''' -msg: - description: Message with action result or error description - returned: always - type: str - sample: "Action was successful" -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.validation import ( - check_mutually_exclusive, - check_required_arguments -) -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils -from ansible.module_utils.common.text.converters import to_native - - -class IdracRedfishUtils(RedfishUtils): - - def set_manager_attributes(self, command): - - result = {} - required_arg_spec = {'manager_attributes': {'required': True}} - - try: - check_required_arguments(required_arg_spec, self.module.params) - - except TypeError as e: - msg = to_native(e) - self.module.fail_json(msg=msg) - - key = "Attributes" - command_manager_attributes_uri_map = { - "SetManagerAttributes": self.manager_uri, - "SetLifecycleControllerAttributes": "/redfish/v1/Managers/LifecycleController.Embedded.1", - "SetSystemAttributes": "/redfish/v1/Managers/System.Embedded.1" - } - manager_uri = command_manager_attributes_uri_map.get(command, self.manager_uri) - - attributes = self.module.params['manager_attributes'] - - attrs_to_patch = {} - attrs_skipped = {} - attrs_bad = {} # Store attrs which were not found in the system - - # Search for key entry and extract URI from it - response = self.get_request(self.root_uri + manager_uri + "/" + key) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - - if key not in data: - return {'ret': False, - 'msg': "%s: Key %s not found" % (command, key), - 'warning': ""} - - for attr_name, attr_value in attributes.items(): - # Check if attribute exists - if attr_name not in data[u'Attributes']: - # Skip and proceed to next attribute if this isn't valid - attrs_bad.update({attr_name: attr_value}) - continue - - # Find out if value is already set to what we want. If yes, exclude - # those attributes - if data[u'Attributes'][attr_name] == attr_value: - attrs_skipped.update({attr_name: attr_value}) - else: - attrs_to_patch.update({attr_name: attr_value}) - - warning = "" - if attrs_bad: - warning = "Incorrect attributes %s" % (attrs_bad) - - if not attrs_to_patch: - return {'ret': True, 'changed': False, - 'msg': "No changes made. Manager attributes already set.", - 'warning': warning} - - payload = {"Attributes": attrs_to_patch} - response = self.patch_request(self.root_uri + manager_uri + "/" + key, payload) - if response['ret'] is False: - return response - - return {'ret': True, 'changed': True, - 'msg': "%s: Modified Manager attributes %s" % (command, attrs_to_patch), - 'warning': warning} - - -CATEGORY_COMMANDS_ALL = { - "Manager": ["SetManagerAttributes", "SetLifecycleControllerAttributes", - "SetSystemAttributes"] -} - - -# list of mutually exclusive commands for a category -CATEGORY_COMMANDS_MUTUALLY_EXCLUSIVE = { - "Manager": [["SetManagerAttributes", "SetLifecycleControllerAttributes", - "SetSystemAttributes"]] -} - - -def main(): - result = {} - module = AnsibleModule( - argument_spec=dict( - category=dict(required=True), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - manager_attributes=dict(type='dict', default={}), - timeout=dict(type='int', default=10), - resource_id=dict() - ), - required_together=[ - ('username', 'password'), - ], - required_one_of=[ - ('username', 'auth_token'), - ], - mutually_exclusive=[ - ('username', 'auth_token'), - ], - supports_check_mode=False - ) - - category = module.params['category'] - command_list = module.params['command'] - - # admin credentials used for authentication - creds = {'user': module.params['username'], - 'pswd': module.params['password'], - 'token': module.params['auth_token']} - - # timeout - timeout = module.params['timeout'] - - # System, Manager or Chassis ID to modify - resource_id = module.params['resource_id'] - - # Build root URI - root_uri = "https://" + module.params['baseuri'] - rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module, - resource_id=resource_id, data_modification=True) - - # Check that Category is valid - if category not in CATEGORY_COMMANDS_ALL: - module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) - - # Check that all commands are valid - for cmd in command_list: - # Fail if even one command given is invalid - if cmd not in CATEGORY_COMMANDS_ALL[category]: - module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) - - # check for mutually exclusive commands - try: - # check_mutually_exclusive accepts a single list or list of lists that - # are groups of terms that should be mutually exclusive with one another - # and checks that against a dictionary - check_mutually_exclusive(CATEGORY_COMMANDS_MUTUALLY_EXCLUSIVE[category], - dict.fromkeys(command_list, True)) - - except TypeError as e: - module.fail_json(msg=to_native(e)) - - # Organize by Categories / Commands - - if category == "Manager": - # execute only if we find a Manager resource - result = rf_utils._find_managers_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) - - for command in command_list: - if command in ["SetManagerAttributes", "SetLifecycleControllerAttributes", "SetSystemAttributes"]: - result = rf_utils.set_manager_attributes(command) - - # Return data back or fail with proper message - if result['ret'] is True: - if result.get('warning'): - module.warn(to_native(result['warning'])) - - module.exit_json(changed=result['changed'], msg=to_native(result['msg'])) - else: - module.fail_json(msg=to_native(result['msg'])) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_info.py b/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_info.py deleted file mode 100644 index fb137acc..00000000 --- a/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_info.py +++ /dev/null @@ -1,243 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2019 Dell EMC Inc. -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: idrac_redfish_info -short_description: Gather PowerEdge server information through iDRAC using Redfish APIs -description: - - Builds Redfish URIs locally and sends them to remote iDRAC controllers to - get information back. - - For use with Dell EMC iDRAC operations that require Redfish OEM extensions - - This module was called C(idrac_redfish_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.idrac_redfish_info) module no longer returns C(ansible_facts)! -options: - category: - required: true - description: - - Category to execute on iDRAC controller - type: str - command: - required: true - description: - - List of commands to execute on iDRAC controller - - C(GetManagerAttributes) returns the list of dicts containing iDRAC, - LifecycleController and System attributes - type: list - elements: str - baseuri: - required: true - description: - - Base URI of iDRAC controller - type: str - username: - description: - - User for authentication with iDRAC controller - type: str - password: - description: - - Password for authentication with iDRAC controller - type: str - auth_token: - description: - - Security token for authentication with OOB controller - type: str - version_added: 2.3.0 - timeout: - description: - - Timeout in seconds for URL requests to OOB controller - default: 10 - type: int - -author: "Jose Delarosa (@jose-delarosa)" -''' - -EXAMPLES = ''' - - name: Get Manager attributes with a default of 20 seconds - community.general.idrac_redfish_info: - category: Manager - command: GetManagerAttributes - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - timeout: 20 - register: result - - # Examples to display the value of all or a single iDRAC attribute - - name: Store iDRAC attributes as a fact variable - ansible.builtin.set_fact: - idrac_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'iDRACAttributes') | list | first }}" - - - name: Display all iDRAC attributes - ansible.builtin.debug: - var: idrac_attributes - - - name: Display the value of 'Syslog.1.SysLogEnable' iDRAC attribute - ansible.builtin.debug: - var: idrac_attributes['Syslog.1.SysLogEnable'] - - # Examples to display the value of all or a single LifecycleController attribute - - name: Store LifecycleController attributes as a fact variable - ansible.builtin.set_fact: - lc_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'LCAttributes') | list | first }}" - - - name: Display LifecycleController attributes - ansible.builtin.debug: - var: lc_attributes - - - name: Display the value of 'CollectSystemInventoryOnRestart' attribute - ansible.builtin.debug: - var: lc_attributes['LCAttributes.1.CollectSystemInventoryOnRestart'] - - # Examples to display the value of all or a single System attribute - - name: Store System attributes as a fact variable - ansible.builtin.set_fact: - system_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'SystemAttributes') | list | first }}" - - - name: Display System attributes - ansible.builtin.debug: - var: system_attributes - - - name: Display the value of 'PSRedPolicy' - ansible.builtin.debug: - var: system_attributes['ServerPwr.1.PSRedPolicy'] - -''' - -RETURN = ''' -msg: - description: different results depending on task - returned: always - type: dict - sample: List of Manager attributes -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils -from ansible.module_utils.common.text.converters import to_native - - -class IdracRedfishUtils(RedfishUtils): - - def get_manager_attributes(self): - result = {} - manager_attributes = [] - properties = ['Attributes', 'Id'] - - response = self.get_request(self.root_uri + self.manager_uri) - - if response['ret'] is False: - return response - data = response['data'] - - # Manager attributes are supported as part of iDRAC OEM extension - # Attributes are supported only on iDRAC9 - try: - for members in data[u'Links'][u'Oem'][u'Dell'][u'DellAttributes']: - attributes_uri = members[u'@odata.id'] - - response = self.get_request(self.root_uri + attributes_uri) - if response['ret'] is False: - return response - data = response['data'] - - attributes = {} - for prop in properties: - if prop in data: - attributes[prop] = data.get(prop) - - if attributes: - manager_attributes.append(attributes) - - result['ret'] = True - - except (AttributeError, KeyError) as e: - result['ret'] = False - result['msg'] = "Failed to find attribute/key: " + str(e) - - result["entries"] = manager_attributes - return result - - -CATEGORY_COMMANDS_ALL = { - "Manager": ["GetManagerAttributes"] -} - - -def main(): - result = {} - module = AnsibleModule( - argument_spec=dict( - category=dict(required=True), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - timeout=dict(type='int', default=10) - ), - required_together=[ - ('username', 'password'), - ], - required_one_of=[ - ('username', 'auth_token'), - ], - mutually_exclusive=[ - ('username', 'auth_token'), - ], - supports_check_mode=True, - ) - - category = module.params['category'] - command_list = module.params['command'] - - # admin credentials used for authentication - creds = {'user': module.params['username'], - 'pswd': module.params['password'], - 'token': module.params['auth_token']} - - # timeout - timeout = module.params['timeout'] - - # Build root URI - root_uri = "https://" + module.params['baseuri'] - rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module) - - # Check that Category is valid - if category not in CATEGORY_COMMANDS_ALL: - module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) - - # Check that all commands are valid - for cmd in command_list: - # Fail if even one command given is invalid - if cmd not in CATEGORY_COMMANDS_ALL[category]: - module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) - - # Organize by Categories / Commands - - if category == "Manager": - # execute only if we find a Manager resource - result = rf_utils._find_managers_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) - - for command in command_list: - if command == "GetManagerAttributes": - result = rf_utils.get_manager_attributes() - - # Return data back or fail with proper message - if result['ret'] is True: - del result['ret'] - module.exit_json(redfish_facts=result) - else: - module.fail_json(msg=to_native(result['msg'])) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/remote_management/redfish/ilo_redfish_config.py b/ansible_collections/community/general/plugins/modules/remote_management/redfish/ilo_redfish_config.py deleted file mode 100644 index 46406884..00000000 --- a/ansible_collections/community/general/plugins/modules/remote_management/redfish/ilo_redfish_config.py +++ /dev/null @@ -1,175 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved. -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ilo_redfish_config -short_description: Sets or updates configuration attributes on HPE iLO with Redfish OEM extensions -version_added: 4.2.0 -description: - - Builds Redfish URIs locally and sends them to iLO to - set or update a configuration attribute. - - For use with HPE iLO operations that require Redfish OEM extensions. -options: - category: - required: true - type: str - description: - - Command category to execute on iLO. - choices: ['Manager'] - command: - required: true - description: - - List of commands to execute on iLO. - type: list - elements: str - baseuri: - required: true - description: - - Base URI of iLO. - type: str - username: - description: - - User for authentication with iLO. - type: str - password: - description: - - Password for authentication with iLO. - type: str - auth_token: - description: - - Security token for authentication with OOB controller. - type: str - timeout: - description: - - Timeout in seconds for URL requests to iLO controller. - default: 10 - type: int - attribute_name: - required: true - description: - - Name of the attribute to be configured. - type: str - attribute_value: - required: false - description: - - Value of the attribute to be configured. - type: str -author: - - "Bhavya B (@bhavya06)" -''' - -EXAMPLES = ''' - - name: Disable WINS Registration - community.general.ilo_redfish_config: - category: Manager - command: SetWINSReg - baseuri: 15.X.X.X - username: Admin - password: Testpass123 - attribute_name: WINSRegistration - - - name: Set Time Zone - community.general.ilo_redfish_config: - category: Manager - command: SetTimeZone - baseuri: 15.X.X.X - username: Admin - password: Testpass123 - attribute_name: TimeZone - attribute_value: Chennai -''' - -RETURN = ''' -msg: - description: Message with action result or error description - returned: always - type: str - sample: "Action was successful" -''' - -CATEGORY_COMMANDS_ALL = { - "Manager": ["SetTimeZone", "SetDNSserver", "SetDomainName", "SetNTPServers", "SetWINSReg"] -} - -from ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils import iLORedfishUtils -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - - -def main(): - result = {} - module = AnsibleModule( - argument_spec=dict( - category=dict(required=True, choices=list( - CATEGORY_COMMANDS_ALL.keys())), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - attribute_name=dict(required=True), - attribute_value=dict(), - timeout=dict(type='int', default=10) - ), - required_together=[ - ('username', 'password'), - ], - required_one_of=[ - ('username', 'auth_token'), - ], - mutually_exclusive=[ - ('username', 'auth_token'), - ], - supports_check_mode=False - ) - - category = module.params['category'] - command_list = module.params['command'] - - creds = {"user": module.params['username'], - "pswd": module.params['password'], - "token": module.params['auth_token']} - - timeout = module.params['timeout'] - - root_uri = "https://" + module.params['baseuri'] - rf_utils = iLORedfishUtils(creds, root_uri, timeout, module) - mgr_attributes = {'mgr_attr_name': module.params['attribute_name'], - 'mgr_attr_value': module.params['attribute_value']} - changed = False - - offending = [ - cmd for cmd in command_list if cmd not in CATEGORY_COMMANDS_ALL[category]] - - if offending: - module.fail_json(msg=to_native("Invalid Command(s): '%s'. Allowed Commands = %s" % ( - offending, CATEGORY_COMMANDS_ALL[category]))) - - if category == "Manager": - resource = rf_utils._find_managers_resource() - if not resource['ret']: - module.fail_json(msg=to_native(resource['msg'])) - - dispatch = dict( - SetTimeZone=rf_utils.set_time_zone, - SetDNSserver=rf_utils.set_dns_server, - SetDomainName=rf_utils.set_domain_name, - SetNTPServers=rf_utils.set_ntp_server, - SetWINSReg=rf_utils.set_wins_registration - ) - - for command in command_list: - result[command] = dispatch[command](mgr_attributes) - if 'changed' in result[command]: - changed |= result[command]['changed'] - - module.exit_json(ilo_redfish_config=result, changed=changed) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/remote_management/redfish/ilo_redfish_info.py b/ansible_collections/community/general/plugins/modules/remote_management/redfish/ilo_redfish_info.py deleted file mode 100644 index 6773c4ae..00000000 --- a/ansible_collections/community/general/plugins/modules/remote_management/redfish/ilo_redfish_info.py +++ /dev/null @@ -1,186 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved. -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ilo_redfish_info -short_description: Gathers server information through iLO using Redfish APIs -version_added: 4.2.0 -description: - - Builds Redfish URIs locally and sends them to iLO to - get information back. - - For use with HPE iLO operations that require Redfish OEM extensions. -options: - category: - required: true - description: - - List of categories to execute on iLO. - type: list - elements: str - command: - required: true - description: - - List of commands to execute on iLO. - type: list - elements: str - baseuri: - required: true - description: - - Base URI of iLO. - type: str - username: - description: - - User for authentication with iLO. - type: str - password: - description: - - Password for authentication with iLO. - type: str - auth_token: - description: - - Security token for authentication with iLO. - type: str - timeout: - description: - - Timeout in seconds for URL requests to iLO. - default: 10 - type: int -author: - - "Bhavya B (@bhavya06)" -''' - -EXAMPLES = ''' - - name: Get iLO Sessions - community.general.ilo_redfish_info: - category: Sessions - command: GetiLOSessions - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result_sessions -''' - -RETURN = ''' -ilo_redfish_info: - description: Returns iLO sessions. - type: dict - contains: - GetiLOSessions: - description: Returns the iLO session msg and whether the function executed successfully. - type: dict - contains: - ret: - description: Check variable to see if the information was succesfully retrived. - type: bool - msg: - description: Information of all active iLO sessions. - type: list - elements: dict - contains: - Description: - description: Provides a description of the resource. - type: str - Id: - description: The sessionId. - type: str - Name: - description: The name of the resource. - type: str - UserName: - description: Name to use to log in to the management processor. - type: str - returned: always -''' - -CATEGORY_COMMANDS_ALL = { - "Sessions": ["GetiLOSessions"] -} - -CATEGORY_COMMANDS_DEFAULT = { - "Sessions": "GetiLOSessions" -} - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native -from ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils import iLORedfishUtils - - -def main(): - result = {} - category_list = [] - module = AnsibleModule( - argument_spec=dict( - category=dict(required=True, type='list', elements='str'), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - timeout=dict(type='int', default=10) - ), - required_together=[ - ('username', 'password'), - ], - required_one_of=[ - ('username', 'auth_token'), - ], - mutually_exclusive=[ - ('username', 'auth_token'), - ], - supports_check_mode=True - ) - - creds = {"user": module.params['username'], - "pswd": module.params['password'], - "token": module.params['auth_token']} - - timeout = module.params['timeout'] - - root_uri = "https://" + module.params['baseuri'] - rf_utils = iLORedfishUtils(creds, root_uri, timeout, module) - - # Build Category list - if "all" in module.params['category']: - for entry in CATEGORY_COMMANDS_ALL: - category_list.append(entry) - else: - # one or more categories specified - category_list = module.params['category'] - - for category in category_list: - command_list = [] - # Build Command list for each Category - if category in CATEGORY_COMMANDS_ALL: - if not module.params['command']: - # True if we don't specify a command --> use default - command_list.append(CATEGORY_COMMANDS_DEFAULT[category]) - elif "all" in module.params['command']: - for entry in CATEGORY_COMMANDS_ALL[category]: - command_list.append(entry) - # one or more commands - else: - command_list = module.params['command'] - # Verify that all commands are valid - for cmd in command_list: - # Fail if even one command given is invalid - if cmd not in CATEGORY_COMMANDS_ALL[category]: - module.fail_json(msg="Invalid Command: %s" % cmd) - else: - # Fail if even one category given is invalid - module.fail_json(msg="Invalid Category: %s" % category) - - # Organize by Categories / Commands - if category == "Sessions": - for command in command_list: - if command == "GetiLOSessions": - result[command] = rf_utils.get_ilo_sessions() - - module.exit_json(ilo_redfish_info=result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_command.py b/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_command.py deleted file mode 100644 index 5437a798..00000000 --- a/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_command.py +++ /dev/null @@ -1,831 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017-2018 Dell EMC Inc. -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: redfish_command -short_description: Manages Out-Of-Band controllers using Redfish APIs -description: - - Builds Redfish URIs locally and sends them to remote OOB controllers to - perform an action. - - Manages OOB controller ex. reboot, log management. - - Manages OOB controller users ex. add, remove, update. - - Manages system power ex. on, off, graceful and forced reboot. -options: - category: - required: true - description: - - Category to execute on OOB controller - type: str - command: - required: true - description: - - List of commands to execute on OOB controller - type: list - elements: str - baseuri: - required: true - description: - - Base URI of OOB controller - type: str - username: - description: - - Username for authentication with OOB controller - type: str - password: - description: - - Password for authentication with OOB controller - type: str - auth_token: - description: - - Security token for authentication with OOB controller - type: str - version_added: 2.3.0 - session_uri: - description: - - URI of the session resource - type: str - version_added: 2.3.0 - id: - required: false - aliases: [ account_id ] - description: - - ID of account to delete/modify. - - Can also be used in account creation to work around vendor issues where the ID of the new user is required in the POST request. - type: str - new_username: - required: false - aliases: [ account_username ] - description: - - Username of account to add/delete/modify - type: str - new_password: - required: false - aliases: [ account_password ] - description: - - New password of account to add/modify - type: str - roleid: - required: false - aliases: [ account_roleid ] - description: - - Role of account to add/modify - type: str - bootdevice: - required: false - description: - - bootdevice when setting boot configuration - type: str - timeout: - description: - - Timeout in seconds for URL requests to OOB controller - default: 10 - type: int - boot_override_mode: - description: - - Boot mode when using an override. - type: str - choices: [ Legacy, UEFI ] - version_added: 3.5.0 - uefi_target: - required: false - description: - - UEFI target when bootdevice is "UefiTarget" - type: str - boot_next: - required: false - description: - - BootNext target when bootdevice is "UefiBootNext" - type: str - update_username: - required: false - aliases: [ account_updatename ] - description: - - new update user name for account_username - type: str - version_added: '0.2.0' - account_properties: - required: false - description: - - properties of account service to update - type: dict - version_added: '0.2.0' - resource_id: - required: false - description: - - The ID of the System, Manager or Chassis to modify - type: str - version_added: '0.2.0' - update_image_uri: - required: false - description: - - The URI of the image for the update - type: str - version_added: '0.2.0' - update_protocol: - required: false - description: - - The protocol for the update - type: str - version_added: '0.2.0' - update_targets: - required: false - description: - - The list of target resource URIs to apply the update to - type: list - elements: str - version_added: '0.2.0' - update_creds: - required: false - description: - - The credentials for retrieving the update image - type: dict - version_added: '0.2.0' - suboptions: - username: - required: false - description: - - The username for retrieving the update image - type: str - password: - required: false - description: - - The password for retrieving the update image - type: str - virtual_media: - required: false - description: - - The options for VirtualMedia commands - type: dict - version_added: '0.2.0' - suboptions: - media_types: - required: false - description: - - The list of media types appropriate for the image - type: list - elements: str - image_url: - required: false - description: - - The URL od the image the insert or eject - type: str - inserted: - required: false - description: - - Indicates if the image is treated as inserted on command completion - type: bool - default: True - write_protected: - required: false - description: - - Indicates if the media is treated as write-protected - type: bool - default: True - username: - required: false - description: - - The username for accessing the image URL - type: str - password: - required: false - description: - - The password for accessing the image URL - type: str - transfer_protocol_type: - required: false - description: - - The network protocol to use with the image - type: str - transfer_method: - required: false - description: - - The transfer method to use with the image - type: str - strip_etag_quotes: - description: - - Removes surrounding quotes of etag used in C(If-Match) header - of C(PATCH) requests. - - Only use this option to resolve bad vendor implementation where - C(If-Match) only matches the unquoted etag string. - type: bool - default: false - version_added: 3.7.0 - -author: "Jose Delarosa (@jose-delarosa)" -''' - -EXAMPLES = ''' - - name: Restart system power gracefully - community.general.redfish_command: - category: Systems - command: PowerGracefulRestart - resource_id: 437XR1138R2 - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Turn system power off - community.general.redfish_command: - category: Systems - command: PowerForceOff - resource_id: 437XR1138R2 - - - name: Restart system power forcefully - community.general.redfish_command: - category: Systems - command: PowerForceRestart - resource_id: 437XR1138R2 - - - name: Shutdown system power gracefully - community.general.redfish_command: - category: Systems - command: PowerGracefulShutdown - resource_id: 437XR1138R2 - - - name: Turn system power on - community.general.redfish_command: - category: Systems - command: PowerOn - resource_id: 437XR1138R2 - - - name: Reboot system power - community.general.redfish_command: - category: Systems - command: PowerReboot - resource_id: 437XR1138R2 - - - name: Set one-time boot device to {{ bootdevice }} - community.general.redfish_command: - category: Systems - command: SetOneTimeBoot - resource_id: 437XR1138R2 - bootdevice: "{{ bootdevice }}" - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Set one-time boot device to UefiTarget of "/0x31/0x33/0x01/0x01" - community.general.redfish_command: - category: Systems - command: SetOneTimeBoot - resource_id: 437XR1138R2 - bootdevice: "UefiTarget" - uefi_target: "/0x31/0x33/0x01/0x01" - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Set one-time boot device to BootNext target of "Boot0001" - community.general.redfish_command: - category: Systems - command: SetOneTimeBoot - resource_id: 437XR1138R2 - bootdevice: "UefiBootNext" - boot_next: "Boot0001" - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Set persistent boot device override - community.general.redfish_command: - category: Systems - command: EnableContinuousBootOverride - resource_id: 437XR1138R2 - bootdevice: "{{ bootdevice }}" - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Set one-time boot to BiosSetup - community.general.redfish_command: - category: Systems - command: SetOneTimeBoot - boot_next: BiosSetup - boot_override_mode: Legacy - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Disable persistent boot device override - community.general.redfish_command: - category: Systems - command: DisableBootOverride - - - name: Add user - community.general.redfish_command: - category: Accounts - command: AddUser - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - new_username: "{{ new_username }}" - new_password: "{{ new_password }}" - roleid: "{{ roleid }}" - - - name: Add user using new option aliases - community.general.redfish_command: - category: Accounts - command: AddUser - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" - account_password: "{{ account_password }}" - account_roleid: "{{ account_roleid }}" - - - name: Delete user - community.general.redfish_command: - category: Accounts - command: DeleteUser - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" - - - name: Disable user - community.general.redfish_command: - category: Accounts - command: DisableUser - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" - - - name: Enable user - community.general.redfish_command: - category: Accounts - command: EnableUser - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" - - - name: Add and enable user - community.general.redfish_command: - category: Accounts - command: AddUser,EnableUser - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - new_username: "{{ new_username }}" - new_password: "{{ new_password }}" - roleid: "{{ roleid }}" - - - name: Update user password - community.general.redfish_command: - category: Accounts - command: UpdateUserPassword - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" - account_password: "{{ account_password }}" - - - name: Update user role - community.general.redfish_command: - category: Accounts - command: UpdateUserRole - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" - roleid: "{{ roleid }}" - - - name: Update user name - community.general.redfish_command: - category: Accounts - command: UpdateUserName - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" - account_updatename: "{{ account_updatename }}" - - - name: Update user name - community.general.redfish_command: - category: Accounts - command: UpdateUserName - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" - update_username: "{{ update_username }}" - - - name: Update AccountService properties - community.general.redfish_command: - category: Accounts - command: UpdateAccountServiceProperties - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_properties: - AccountLockoutThreshold: 5 - AccountLockoutDuration: 600 - - - name: Clear Manager Logs with a timeout of 20 seconds - community.general.redfish_command: - category: Manager - command: ClearLogs - resource_id: BMC - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - timeout: 20 - - - name: Create session - community.general.redfish_command: - category: Sessions - command: CreateSession - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result - - - name: Set chassis indicator LED to blink using security token for auth - community.general.redfish_command: - category: Chassis - command: IndicatorLedBlink - resource_id: 1U - baseuri: "{{ baseuri }}" - auth_token: "{{ result.session.token }}" - - - name: Delete session using security token created by CreateSesssion above - community.general.redfish_command: - category: Sessions - command: DeleteSession - baseuri: "{{ baseuri }}" - auth_token: "{{ result.session.token }}" - session_uri: "{{ result.session.uri }}" - - - name: Clear Sessions - community.general.redfish_command: - category: Sessions - command: ClearSessions - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Simple update - community.general.redfish_command: - category: Update - command: SimpleUpdate - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - update_image_uri: https://example.com/myupdate.img - - - name: Simple update with additional options - community.general.redfish_command: - category: Update - command: SimpleUpdate - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - update_image_uri: //example.com/myupdate.img - update_protocol: FTP - update_targets: - - /redfish/v1/UpdateService/FirmwareInventory/BMC - update_creds: - username: operator - password: supersecretpwd - - - name: Insert Virtual Media - community.general.redfish_command: - category: Manager - command: VirtualMediaInsert - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - virtual_media: - image_url: 'http://example.com/images/SomeLinux-current.iso' - media_types: - - CD - - DVD - resource_id: BMC - - - name: Eject Virtual Media - community.general.redfish_command: - category: Manager - command: VirtualMediaEject - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - virtual_media: - image_url: 'http://example.com/images/SomeLinux-current.iso' - resource_id: BMC - - - name: Restart manager power gracefully - community.general.redfish_command: - category: Manager - command: GracefulRestart - resource_id: BMC - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Restart manager power gracefully - community.general.redfish_command: - category: Manager - command: PowerGracefulRestart - resource_id: BMC - - - name: Turn manager power off - community.general.redfish_command: - category: Manager - command: PowerForceOff - resource_id: BMC - - - name: Restart manager power forcefully - community.general.redfish_command: - category: Manager - command: PowerForceRestart - resource_id: BMC - - - name: Shutdown manager power gracefully - community.general.redfish_command: - category: Manager - command: PowerGracefulShutdown - resource_id: BMC - - - name: Turn manager power on - community.general.redfish_command: - category: Manager - command: PowerOn - resource_id: BMC - - - name: Reboot manager power - community.general.redfish_command: - category: Manager - command: PowerReboot - resource_id: BMC -''' - -RETURN = ''' -msg: - description: Message with action result or error description - returned: always - type: str - sample: "Action was successful" -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils -from ansible.module_utils.common.text.converters import to_native - - -# More will be added as module features are expanded -CATEGORY_COMMANDS_ALL = { - "Systems": ["PowerOn", "PowerForceOff", "PowerForceRestart", "PowerGracefulRestart", - "PowerGracefulShutdown", "PowerReboot", "SetOneTimeBoot", "EnableContinuousBootOverride", "DisableBootOverride"], - "Chassis": ["IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"], - "Accounts": ["AddUser", "EnableUser", "DeleteUser", "DisableUser", - "UpdateUserRole", "UpdateUserPassword", "UpdateUserName", - "UpdateAccountServiceProperties"], - "Sessions": ["ClearSessions", "CreateSession", "DeleteSession"], - "Manager": ["GracefulRestart", "ClearLogs", "VirtualMediaInsert", - "VirtualMediaEject", "PowerOn", "PowerForceOff", "PowerForceRestart", - "PowerGracefulRestart", "PowerGracefulShutdown", "PowerReboot"], - "Update": ["SimpleUpdate"] -} - - -def main(): - result = {} - module = AnsibleModule( - argument_spec=dict( - category=dict(required=True), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - session_uri=dict(), - id=dict(aliases=["account_id"]), - new_username=dict(aliases=["account_username"]), - new_password=dict(aliases=["account_password"], no_log=True), - roleid=dict(aliases=["account_roleid"]), - update_username=dict(type='str', aliases=["account_updatename"]), - account_properties=dict(type='dict', default={}), - bootdevice=dict(), - timeout=dict(type='int', default=10), - uefi_target=dict(), - boot_next=dict(), - boot_override_mode=dict(choices=['Legacy', 'UEFI']), - resource_id=dict(), - update_image_uri=dict(), - update_protocol=dict(), - update_targets=dict(type='list', elements='str', default=[]), - update_creds=dict( - type='dict', - options=dict( - username=dict(), - password=dict(no_log=True) - ) - ), - virtual_media=dict( - type='dict', - options=dict( - media_types=dict(type='list', elements='str', default=[]), - image_url=dict(), - inserted=dict(type='bool', default=True), - write_protected=dict(type='bool', default=True), - username=dict(), - password=dict(no_log=True), - transfer_protocol_type=dict(), - transfer_method=dict(), - ) - ), - strip_etag_quotes=dict(type='bool', default=False), - ), - required_together=[ - ('username', 'password'), - ], - required_one_of=[ - ('username', 'auth_token'), - ], - mutually_exclusive=[ - ('username', 'auth_token'), - ], - supports_check_mode=False - ) - - category = module.params['category'] - command_list = module.params['command'] - - # admin credentials used for authentication - creds = {'user': module.params['username'], - 'pswd': module.params['password'], - 'token': module.params['auth_token']} - - # user to add/modify/delete - user = {'account_id': module.params['id'], - 'account_username': module.params['new_username'], - 'account_password': module.params['new_password'], - 'account_roleid': module.params['roleid'], - 'account_updatename': module.params['update_username'], - 'account_properties': module.params['account_properties']} - - # timeout - timeout = module.params['timeout'] - - # System, Manager or Chassis ID to modify - resource_id = module.params['resource_id'] - - # update options - update_opts = { - 'update_image_uri': module.params['update_image_uri'], - 'update_protocol': module.params['update_protocol'], - 'update_targets': module.params['update_targets'], - 'update_creds': module.params['update_creds'] - } - - # Boot override options - boot_opts = { - 'bootdevice': module.params['bootdevice'], - 'uefi_target': module.params['uefi_target'], - 'boot_next': module.params['boot_next'], - 'boot_override_mode': module.params['boot_override_mode'], - } - - # VirtualMedia options - virtual_media = module.params['virtual_media'] - - # Etag options - strip_etag_quotes = module.params['strip_etag_quotes'] - - # Build root URI - root_uri = "https://" + module.params['baseuri'] - rf_utils = RedfishUtils(creds, root_uri, timeout, module, - resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes) - - # Check that Category is valid - if category not in CATEGORY_COMMANDS_ALL: - module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) - - # Check that all commands are valid - for cmd in command_list: - # Fail if even one command given is invalid - if cmd not in CATEGORY_COMMANDS_ALL[category]: - module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) - - # Organize by Categories / Commands - if category == "Accounts": - ACCOUNTS_COMMANDS = { - "AddUser": rf_utils.add_user, - "EnableUser": rf_utils.enable_user, - "DeleteUser": rf_utils.delete_user, - "DisableUser": rf_utils.disable_user, - "UpdateUserRole": rf_utils.update_user_role, - "UpdateUserPassword": rf_utils.update_user_password, - "UpdateUserName": rf_utils.update_user_name, - "UpdateAccountServiceProperties": rf_utils.update_accountservice_properties - } - - # execute only if we find an Account service resource - result = rf_utils._find_accountservice_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) - - for command in command_list: - result = ACCOUNTS_COMMANDS[command](user) - - elif category == "Systems": - # execute only if we find a System resource - result = rf_utils._find_systems_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) - - for command in command_list: - if command.startswith('Power'): - result = rf_utils.manage_system_power(command) - elif command == "SetOneTimeBoot": - boot_opts['override_enabled'] = 'Once' - result = rf_utils.set_boot_override(boot_opts) - elif command == "EnableContinuousBootOverride": - boot_opts['override_enabled'] = 'Continuous' - result = rf_utils.set_boot_override(boot_opts) - elif command == "DisableBootOverride": - boot_opts['override_enabled'] = 'Disabled' - result = rf_utils.set_boot_override(boot_opts) - - elif category == "Chassis": - result = rf_utils._find_chassis_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) - - led_commands = ["IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"] - - # Check if more than one led_command is present - num_led_commands = sum([command in led_commands for command in command_list]) - if num_led_commands > 1: - result = {'ret': False, 'msg': "Only one IndicatorLed command should be sent at a time."} - else: - for command in command_list: - if command in led_commands: - result = rf_utils.manage_indicator_led(command) - - elif category == "Sessions": - # execute only if we find SessionService resources - resource = rf_utils._find_sessionservice_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) - - for command in command_list: - if command == "ClearSessions": - result = rf_utils.clear_sessions() - elif command == "CreateSession": - result = rf_utils.create_session() - elif command == "DeleteSession": - result = rf_utils.delete_session(module.params['session_uri']) - - elif category == "Manager": - # execute only if we find a Manager service resource - result = rf_utils._find_managers_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) - - for command in command_list: - # standardize on the Power* commands, but allow the the legacy - # GracefulRestart command - if command == 'GracefulRestart': - command = 'PowerGracefulRestart' - - if command.startswith('Power'): - result = rf_utils.manage_manager_power(command) - elif command == 'ClearLogs': - result = rf_utils.clear_logs() - elif command == 'VirtualMediaInsert': - result = rf_utils.virtual_media_insert(virtual_media) - elif command == 'VirtualMediaEject': - result = rf_utils.virtual_media_eject(virtual_media) - - elif category == "Update": - # execute only if we find UpdateService resources - resource = rf_utils._find_updateservice_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) - - for command in command_list: - if command == "SimpleUpdate": - result = rf_utils.simple_update(update_opts) - - # Return data back or fail with proper message - if result['ret'] is True: - del result['ret'] - changed = result.get('changed', True) - session = result.get('session', dict()) - module.exit_json(changed=changed, session=session, - msg='Action was successful') - else: - module.fail_json(msg=to_native(result['msg'])) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_config.py b/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_config.py deleted file mode 100644 index b903ceed..00000000 --- a/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_config.py +++ /dev/null @@ -1,389 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017-2018 Dell EMC Inc. -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: redfish_config -short_description: Manages Out-Of-Band controllers using Redfish APIs -description: - - Builds Redfish URIs locally and sends them to remote OOB controllers to - set or update a configuration attribute. - - Manages BIOS configuration settings. - - Manages OOB controller configuration settings. -options: - category: - required: true - description: - - Category to execute on OOB controller - type: str - command: - required: true - description: - - List of commands to execute on OOB controller - type: list - elements: str - baseuri: - required: true - description: - - Base URI of OOB controller - type: str - username: - description: - - User for authentication with OOB controller - type: str - password: - description: - - Password for authentication with OOB controller - type: str - auth_token: - description: - - Security token for authentication with OOB controller - type: str - version_added: 2.3.0 - bios_attributes: - required: false - description: - - dictionary of BIOS attributes to update - default: {} - type: dict - version_added: '0.2.0' - timeout: - description: - - Timeout in seconds for URL requests to OOB controller - default: 10 - type: int - boot_order: - required: false - description: - - list of BootOptionReference strings specifying the BootOrder - default: [] - type: list - elements: str - version_added: '0.2.0' - network_protocols: - required: false - description: - - setting dict of manager services to update - type: dict - version_added: '0.2.0' - resource_id: - required: false - description: - - The ID of the System, Manager or Chassis to modify - type: str - version_added: '0.2.0' - nic_addr: - required: false - description: - - EthernetInterface Address string on OOB controller - default: 'null' - type: str - version_added: '0.2.0' - nic_config: - required: false - description: - - setting dict of EthernetInterface on OOB controller - type: dict - version_added: '0.2.0' - strip_etag_quotes: - description: - - Removes surrounding quotes of etag used in C(If-Match) header - of C(PATCH) requests. - - Only use this option to resolve bad vendor implementation where - C(If-Match) only matches the unquoted etag string. - type: bool - default: false - version_added: 3.7.0 - hostinterface_config: - required: false - description: - - Setting dict of HostInterface on OOB controller. - type: dict - version_added: '4.1.0' - hostinterface_id: - required: false - description: - - Redfish HostInterface instance ID if multiple HostInterfaces are present. - type: str - version_added: '4.1.0' - -author: "Jose Delarosa (@jose-delarosa)" -''' - -EXAMPLES = ''' - - name: Set BootMode to UEFI - community.general.redfish_config: - category: Systems - command: SetBiosAttributes - resource_id: 437XR1138R2 - bios_attributes: - BootMode: "Uefi" - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Set multiple BootMode attributes - community.general.redfish_config: - category: Systems - command: SetBiosAttributes - resource_id: 437XR1138R2 - bios_attributes: - BootMode: "Bios" - OneTimeBootMode: "Enabled" - BootSeqRetry: "Enabled" - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Enable PXE Boot for NIC1 - community.general.redfish_config: - category: Systems - command: SetBiosAttributes - resource_id: 437XR1138R2 - bios_attributes: - PxeDev1EnDis: Enabled - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Set BIOS default settings with a timeout of 20 seconds - community.general.redfish_config: - category: Systems - command: SetBiosDefaultSettings - resource_id: 437XR1138R2 - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - timeout: 20 - - - name: Set boot order - community.general.redfish_config: - category: Systems - command: SetBootOrder - boot_order: - - Boot0002 - - Boot0001 - - Boot0000 - - Boot0003 - - Boot0004 - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Set boot order to the default - community.general.redfish_config: - category: Systems - command: SetDefaultBootOrder - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Set Manager Network Protocols - community.general.redfish_config: - category: Manager - command: SetNetworkProtocols - network_protocols: - SNMP: - ProtocolEnabled: True - Port: 161 - HTTP: - ProtocolEnabled: False - Port: 8080 - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Set Manager NIC - community.general.redfish_config: - category: Manager - command: SetManagerNic - nic_config: - DHCPv4: - DHCPEnabled: False - IPv4StaticAddresses: - Address: 192.168.1.3 - Gateway: 192.168.1.1 - SubnetMask: 255.255.255.0 - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Disable Host Interface - community.general.redfish_config: - category: Manager - command: SetHostInterface - hostinterface_config: - InterfaceEnabled: false - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Enable Host Interface for HostInterface resource ID '2' - community.general.redfish_config: - category: Manager - command: SetHostInterface - hostinterface_config: - InterfaceEnabled: true - hostinterface_id: "2" - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" -''' - -RETURN = ''' -msg: - description: Message with action result or error description - returned: always - type: str - sample: "Action was successful" -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils -from ansible.module_utils.common.text.converters import to_native - - -# More will be added as module features are expanded -CATEGORY_COMMANDS_ALL = { - "Systems": ["SetBiosDefaultSettings", "SetBiosAttributes", "SetBootOrder", - "SetDefaultBootOrder"], - "Manager": ["SetNetworkProtocols", "SetManagerNic", "SetHostInterface"] -} - - -def main(): - result = {} - module = AnsibleModule( - argument_spec=dict( - category=dict(required=True), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - bios_attributes=dict(type='dict', default={}), - timeout=dict(type='int', default=10), - boot_order=dict(type='list', elements='str', default=[]), - network_protocols=dict( - type='dict', - default={} - ), - resource_id=dict(), - nic_addr=dict(default='null'), - nic_config=dict( - type='dict', - default={} - ), - strip_etag_quotes=dict(type='bool', default=False), - hostinterface_config=dict(type='dict', default={}), - hostinterface_id=dict(), - ), - required_together=[ - ('username', 'password'), - ], - required_one_of=[ - ('username', 'auth_token'), - ], - mutually_exclusive=[ - ('username', 'auth_token'), - ], - supports_check_mode=False - ) - - category = module.params['category'] - command_list = module.params['command'] - - # admin credentials used for authentication - creds = {'user': module.params['username'], - 'pswd': module.params['password'], - 'token': module.params['auth_token']} - - # timeout - timeout = module.params['timeout'] - - # BIOS attributes to update - bios_attributes = module.params['bios_attributes'] - - # boot order - boot_order = module.params['boot_order'] - - # System, Manager or Chassis ID to modify - resource_id = module.params['resource_id'] - - # manager nic - nic_addr = module.params['nic_addr'] - nic_config = module.params['nic_config'] - - # Etag options - strip_etag_quotes = module.params['strip_etag_quotes'] - - # HostInterface config options - hostinterface_config = module.params['hostinterface_config'] - - # HostInterface instance ID - hostinterface_id = module.params['hostinterface_id'] - - # Build root URI - root_uri = "https://" + module.params['baseuri'] - rf_utils = RedfishUtils(creds, root_uri, timeout, module, - resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes) - - # Check that Category is valid - if category not in CATEGORY_COMMANDS_ALL: - module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) - - # Check that all commands are valid - for cmd in command_list: - # Fail if even one command given is invalid - if cmd not in CATEGORY_COMMANDS_ALL[category]: - module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) - - # Organize by Categories / Commands - if category == "Systems": - # execute only if we find a System resource - result = rf_utils._find_systems_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) - - for command in command_list: - if command == "SetBiosDefaultSettings": - result = rf_utils.set_bios_default_settings() - elif command == "SetBiosAttributes": - result = rf_utils.set_bios_attributes(bios_attributes) - elif command == "SetBootOrder": - result = rf_utils.set_boot_order(boot_order) - elif command == "SetDefaultBootOrder": - result = rf_utils.set_default_boot_order() - - elif category == "Manager": - # execute only if we find a Manager service resource - result = rf_utils._find_managers_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) - - for command in command_list: - if command == "SetNetworkProtocols": - result = rf_utils.set_network_protocols(module.params['network_protocols']) - elif command == "SetManagerNic": - result = rf_utils.set_manager_nic(nic_addr, nic_config) - elif command == "SetHostInterface": - result = rf_utils.set_hostinterface_attributes(hostinterface_config, hostinterface_id) - - # Return data back or fail with proper message - if result['ret'] is True: - if result.get('warning'): - module.warn(to_native(result['warning'])) - - module.exit_json(changed=result['changed'], msg=to_native(result['msg'])) - else: - module.fail_json(msg=to_native(result['msg'])) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_info.py b/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_info.py deleted file mode 100644 index c0576ff4..00000000 --- a/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_info.py +++ /dev/null @@ -1,494 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017-2018 Dell EMC Inc. -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: redfish_info -short_description: Manages Out-Of-Band controllers using Redfish APIs -description: - - Builds Redfish URIs locally and sends them to remote OOB controllers to - get information back. - - Information retrieved is placed in a location specified by the user. - - This module was called C(redfish_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.redfish_info) module no longer returns C(ansible_facts)! -options: - category: - required: false - description: - - List of categories to execute on OOB controller - default: ['Systems'] - type: list - elements: str - command: - required: false - description: - - List of commands to execute on OOB controller - type: list - elements: str - baseuri: - required: true - description: - - Base URI of OOB controller - type: str - username: - description: - - User for authentication with OOB controller - type: str - password: - description: - - Password for authentication with OOB controller - type: str - auth_token: - description: - - Security token for authentication with OOB controller - type: str - version_added: 2.3.0 - timeout: - description: - - Timeout in seconds for URL requests to OOB controller - default: 10 - type: int - -author: "Jose Delarosa (@jose-delarosa)" -''' - -EXAMPLES = ''' - - name: Get CPU inventory - community.general.redfish_info: - category: Systems - command: GetCpuInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result - - - name: Print fetched information - ansible.builtin.debug: - msg: "{{ result.redfish_facts.cpu.entries | to_nice_json }}" - - - name: Get CPU model - community.general.redfish_info: - category: Systems - command: GetCpuInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result - - - name: Print fetched information - ansible.builtin.debug: - msg: "{{ result.redfish_facts.cpu.entries.0.Model }}" - - - name: Get memory inventory - community.general.redfish_info: - category: Systems - command: GetMemoryInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result - - - name: Get fan inventory with a timeout of 20 seconds - community.general.redfish_info: - category: Chassis - command: GetFanInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - timeout: 20 - register: result - - - name: Get Virtual Media information - community.general.redfish_info: - category: Manager - command: GetVirtualMedia - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result - - - name: Print fetched information - ansible.builtin.debug: - msg: "{{ result.redfish_facts.virtual_media.entries | to_nice_json }}" - - - name: Get Volume Inventory - community.general.redfish_info: - category: Systems - command: GetVolumeInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result - - name: Print fetched information - ansible.builtin.debug: - msg: "{{ result.redfish_facts.volume.entries | to_nice_json }}" - - - name: Get Session information - community.general.redfish_info: - category: Sessions - command: GetSessions - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result - - - name: Print fetched information - ansible.builtin.debug: - msg: "{{ result.redfish_facts.session.entries | to_nice_json }}" - - - name: Get default inventory information - community.general.redfish_info: - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result - - name: Print fetched information - ansible.builtin.debug: - msg: "{{ result.redfish_facts | to_nice_json }}" - - - name: Get several inventories - community.general.redfish_info: - category: Systems - command: GetNicInventory,GetBiosAttributes - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get default system inventory and user information - community.general.redfish_info: - category: Systems,Accounts - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get default system, user and firmware information - community.general.redfish_info: - category: ["Systems", "Accounts", "Update"] - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get Manager NIC inventory information - community.general.redfish_info: - category: Manager - command: GetManagerNicInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get boot override information - community.general.redfish_info: - category: Systems - command: GetBootOverride - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get chassis inventory - community.general.redfish_info: - category: Chassis - command: GetChassisInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get all information available in the Manager category - community.general.redfish_info: - category: Manager - command: all - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get firmware update capability information - community.general.redfish_info: - category: Update - command: GetFirmwareUpdateCapabilities - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get firmware inventory - community.general.redfish_info: - category: Update - command: GetFirmwareInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get software inventory - community.general.redfish_info: - category: Update - command: GetSoftwareInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get Manager Services - community.general.redfish_info: - category: Manager - command: GetNetworkProtocols - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get all information available in all categories - community.general.redfish_info: - category: all - command: all - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get system health report - community.general.redfish_info: - category: Systems - command: GetHealthReport - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get chassis health report - community.general.redfish_info: - category: Chassis - command: GetHealthReport - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get manager health report - community.general.redfish_info: - category: Manager - command: GetHealthReport - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get manager Redfish Host Interface inventory - community.general.redfish_info: - category: Manager - command: GetHostInterfaces - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" -''' - -RETURN = ''' -result: - description: different results depending on task - returned: always - type: dict - sample: List of CPUs on system -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils - -CATEGORY_COMMANDS_ALL = { - "Systems": ["GetSystemInventory", "GetPsuInventory", "GetCpuInventory", - "GetMemoryInventory", "GetNicInventory", "GetHealthReport", - "GetStorageControllerInventory", "GetDiskInventory", "GetVolumeInventory", - "GetBiosAttributes", "GetBootOrder", "GetBootOverride"], - "Chassis": ["GetFanInventory", "GetPsuInventory", "GetChassisPower", - "GetChassisThermals", "GetChassisInventory", "GetHealthReport"], - "Accounts": ["ListUsers"], - "Sessions": ["GetSessions"], - "Update": ["GetFirmwareInventory", "GetFirmwareUpdateCapabilities", "GetSoftwareInventory"], - "Manager": ["GetManagerNicInventory", "GetVirtualMedia", "GetLogs", "GetNetworkProtocols", - "GetHealthReport", "GetHostInterfaces"], -} - -CATEGORY_COMMANDS_DEFAULT = { - "Systems": "GetSystemInventory", - "Chassis": "GetFanInventory", - "Accounts": "ListUsers", - "Update": "GetFirmwareInventory", - "Sessions": "GetSessions", - "Manager": "GetManagerNicInventory" -} - - -def main(): - result = {} - category_list = [] - module = AnsibleModule( - argument_spec=dict( - category=dict(type='list', elements='str', default=['Systems']), - command=dict(type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - timeout=dict(type='int', default=10) - ), - required_together=[ - ('username', 'password'), - ], - required_one_of=[ - ('username', 'auth_token'), - ], - mutually_exclusive=[ - ('username', 'auth_token'), - ], - supports_check_mode=True, - ) - - # admin credentials used for authentication - creds = {'user': module.params['username'], - 'pswd': module.params['password'], - 'token': module.params['auth_token']} - - # timeout - timeout = module.params['timeout'] - - # Build root URI - root_uri = "https://" + module.params['baseuri'] - rf_utils = RedfishUtils(creds, root_uri, timeout, module) - - # Build Category list - if "all" in module.params['category']: - for entry in CATEGORY_COMMANDS_ALL: - category_list.append(entry) - else: - # one or more categories specified - category_list = module.params['category'] - - for category in category_list: - command_list = [] - # Build Command list for each Category - if category in CATEGORY_COMMANDS_ALL: - if not module.params['command']: - # True if we don't specify a command --> use default - command_list.append(CATEGORY_COMMANDS_DEFAULT[category]) - elif "all" in module.params['command']: - for entry in range(len(CATEGORY_COMMANDS_ALL[category])): - command_list.append(CATEGORY_COMMANDS_ALL[category][entry]) - # one or more commands - else: - command_list = module.params['command'] - # Verify that all commands are valid - for cmd in command_list: - # Fail if even one command given is invalid - if cmd not in CATEGORY_COMMANDS_ALL[category]: - module.fail_json(msg="Invalid Command: %s" % cmd) - else: - # Fail if even one category given is invalid - module.fail_json(msg="Invalid Category: %s" % category) - - # Organize by Categories / Commands - if category == "Systems": - # execute only if we find a Systems resource - resource = rf_utils._find_systems_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) - - for command in command_list: - if command == "GetSystemInventory": - result["system"] = rf_utils.get_multi_system_inventory() - elif command == "GetCpuInventory": - result["cpu"] = rf_utils.get_multi_cpu_inventory() - elif command == "GetMemoryInventory": - result["memory"] = rf_utils.get_multi_memory_inventory() - elif command == "GetNicInventory": - result["nic"] = rf_utils.get_multi_nic_inventory(category) - elif command == "GetStorageControllerInventory": - result["storage_controller"] = rf_utils.get_multi_storage_controller_inventory() - elif command == "GetDiskInventory": - result["disk"] = rf_utils.get_multi_disk_inventory() - elif command == "GetVolumeInventory": - result["volume"] = rf_utils.get_multi_volume_inventory() - elif command == "GetBiosAttributes": - result["bios_attribute"] = rf_utils.get_multi_bios_attributes() - elif command == "GetBootOrder": - result["boot_order"] = rf_utils.get_multi_boot_order() - elif command == "GetBootOverride": - result["boot_override"] = rf_utils.get_multi_boot_override() - elif command == "GetHealthReport": - result["health_report"] = rf_utils.get_multi_system_health_report() - - elif category == "Chassis": - # execute only if we find Chassis resource - resource = rf_utils._find_chassis_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) - - for command in command_list: - if command == "GetFanInventory": - result["fan"] = rf_utils.get_fan_inventory() - elif command == "GetPsuInventory": - result["psu"] = rf_utils.get_psu_inventory() - elif command == "GetChassisThermals": - result["thermals"] = rf_utils.get_chassis_thermals() - elif command == "GetChassisPower": - result["chassis_power"] = rf_utils.get_chassis_power() - elif command == "GetChassisInventory": - result["chassis"] = rf_utils.get_chassis_inventory() - elif command == "GetHealthReport": - result["health_report"] = rf_utils.get_multi_chassis_health_report() - - elif category == "Accounts": - # execute only if we find an Account service resource - resource = rf_utils._find_accountservice_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) - - for command in command_list: - if command == "ListUsers": - result["user"] = rf_utils.list_users() - - elif category == "Update": - # execute only if we find UpdateService resources - resource = rf_utils._find_updateservice_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) - - for command in command_list: - if command == "GetFirmwareInventory": - result["firmware"] = rf_utils.get_firmware_inventory() - elif command == "GetSoftwareInventory": - result["software"] = rf_utils.get_software_inventory() - elif command == "GetFirmwareUpdateCapabilities": - result["firmware_update_capabilities"] = rf_utils.get_firmware_update_capabilities() - - elif category == "Sessions": - # execute only if we find SessionService resources - resource = rf_utils._find_sessionservice_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) - - for command in command_list: - if command == "GetSessions": - result["session"] = rf_utils.get_sessions() - - elif category == "Manager": - # execute only if we find a Manager service resource - resource = rf_utils._find_managers_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) - - for command in command_list: - if command == "GetManagerNicInventory": - result["manager_nics"] = rf_utils.get_multi_nic_inventory(category) - elif command == "GetVirtualMedia": - result["virtual_media"] = rf_utils.get_multi_virtualmedia() - elif command == "GetLogs": - result["log"] = rf_utils.get_logs() - elif command == "GetNetworkProtocols": - result["network_protocols"] = rf_utils.get_network_protocols() - elif command == "GetHealthReport": - result["health_report"] = rf_utils.get_multi_manager_health_report() - elif command == "GetHostInterfaces": - result["host_interfaces"] = rf_utils.get_hostinterfaces() - - # Return data back - module.exit_json(redfish_facts=result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/remote_management/wakeonlan.py b/ansible_collections/community/general/plugins/modules/remote_management/wakeonlan.py deleted file mode 100644 index 725e070c..00000000 --- a/ansible_collections/community/general/plugins/modules/remote_management/wakeonlan.py +++ /dev/null @@ -1,131 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2016, Dag Wieers -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: wakeonlan -short_description: Send a magic Wake-on-LAN (WoL) broadcast packet -description: -- The C(wakeonlan) module sends magic Wake-on-LAN (WoL) broadcast packets. -options: - mac: - description: - - MAC address to send Wake-on-LAN broadcast packet for. - required: true - type: str - broadcast: - description: - - Network broadcast address to use for broadcasting magic Wake-on-LAN packet. - default: 255.255.255.255 - type: str - port: - description: - - UDP port to use for magic Wake-on-LAN packet. - default: 7 - type: int -todo: - - Add arping support to check whether the system is up (before and after) - - Enable check-mode support (when we have arping support) - - Does not have SecureOn password support -notes: - - This module sends a magic packet, without knowing whether it worked - - Only works if the target system was properly configured for Wake-on-LAN (in the BIOS and/or the OS) - - Some BIOSes have a different (configurable) Wake-on-LAN boot order (i.e. PXE first). -seealso: -- module: community.windows.win_wakeonlan -author: -- Dag Wieers (@dagwieers) -''' - -EXAMPLES = r''' -- name: Send a magic Wake-on-LAN packet to 00:00:5E:00:53:66 - community.general.wakeonlan: - mac: '00:00:5E:00:53:66' - broadcast: 192.0.2.23 - delegate_to: localhost - -- community.general.wakeonlan: - mac: 00:00:5E:00:53:66 - port: 9 - delegate_to: localhost -''' - -RETURN = r''' -# Default return values -''' -import socket -import struct -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - - -def wakeonlan(module, mac, broadcast, port): - """ Send a magic Wake-on-LAN packet. """ - - mac_orig = mac - - # Remove possible separator from MAC address - if len(mac) == 12 + 5: - mac = mac.replace(mac[2], '') - - # If we don't end up with 12 hexadecimal characters, fail - if len(mac) != 12: - module.fail_json(msg="Incorrect MAC address length: %s" % mac_orig) - - # Test if it converts to an integer, otherwise fail - try: - int(mac, 16) - except ValueError: - module.fail_json(msg="Incorrect MAC address format: %s" % mac_orig) - - # Create payload for magic packet - data = b'' - padding = ''.join(['FFFFFFFFFFFF', mac * 20]) - for i in range(0, len(padding), 2): - data = b''.join([data, struct.pack('B', int(padding[i: i + 2], 16))]) - - # Broadcast payload to network - sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) - - if not module.check_mode: - - try: - sock.sendto(data, (broadcast, port)) - except socket.error as e: - sock.close() - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - sock.close() - - -def main(): - module = AnsibleModule( - argument_spec=dict( - mac=dict(type='str', required=True), - broadcast=dict(type='str', default='255.255.255.255'), - port=dict(type='int', default=7), - ), - supports_check_mode=True, - ) - - mac = module.params['mac'] - broadcast = module.params['broadcast'] - port = module.params['port'] - - wakeonlan(module, mac, broadcast, port) - - module.exit_json(changed=True) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/rhevm.py b/ansible_collections/community/general/plugins/modules/rhevm.py deleted file mode 120000 index 0625626f..00000000 --- a/ansible_collections/community/general/plugins/modules/rhevm.py +++ /dev/null @@ -1 +0,0 @@ -cloud/misc/rhevm.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rhn_channel.py b/ansible_collections/community/general/plugins/modules/rhn_channel.py deleted file mode 120000 index 29fd47cf..00000000 --- a/ansible_collections/community/general/plugins/modules/rhn_channel.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/rhn_channel.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rhn_register.py b/ansible_collections/community/general/plugins/modules/rhn_register.py deleted file mode 120000 index d3161422..00000000 --- a/ansible_collections/community/general/plugins/modules/rhn_register.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/rhn_register.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rhsm_release.py b/ansible_collections/community/general/plugins/modules/rhsm_release.py deleted file mode 120000 index b7af986c..00000000 --- a/ansible_collections/community/general/plugins/modules/rhsm_release.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/rhsm_release.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rhsm_repository.py b/ansible_collections/community/general/plugins/modules/rhsm_repository.py deleted file mode 120000 index 68784d5a..00000000 --- a/ansible_collections/community/general/plugins/modules/rhsm_repository.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/rhsm_repository.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/riak.py b/ansible_collections/community/general/plugins/modules/riak.py deleted file mode 120000 index d1b4eaa0..00000000 --- a/ansible_collections/community/general/plugins/modules/riak.py +++ /dev/null @@ -1 +0,0 @@ -database/misc/riak.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rocketchat.py b/ansible_collections/community/general/plugins/modules/rocketchat.py deleted file mode 120000 index 82314f3e..00000000 --- a/ansible_collections/community/general/plugins/modules/rocketchat.py +++ /dev/null @@ -1 +0,0 @@ -notification/rocketchat.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rollbar_deployment.py b/ansible_collections/community/general/plugins/modules/rollbar_deployment.py deleted file mode 120000 index 7ed44943..00000000 --- a/ansible_collections/community/general/plugins/modules/rollbar_deployment.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/rollbar_deployment.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rpm_ostree_pkg.py b/ansible_collections/community/general/plugins/modules/rpm_ostree_pkg.py deleted file mode 120000 index 665ad0ab..00000000 --- a/ansible_collections/community/general/plugins/modules/rpm_ostree_pkg.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/rpm_ostree_pkg.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rundeck_acl_policy.py b/ansible_collections/community/general/plugins/modules/rundeck_acl_policy.py deleted file mode 120000 index b6e6096f..00000000 --- a/ansible_collections/community/general/plugins/modules/rundeck_acl_policy.py +++ /dev/null @@ -1 +0,0 @@ -web_infrastructure/rundeck_acl_policy.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rundeck_job_executions_info.py b/ansible_collections/community/general/plugins/modules/rundeck_job_executions_info.py deleted file mode 120000 index 2a274d6f..00000000 --- a/ansible_collections/community/general/plugins/modules/rundeck_job_executions_info.py +++ /dev/null @@ -1 +0,0 @@ -web_infrastructure/rundeck_job_executions_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rundeck_job_run.py b/ansible_collections/community/general/plugins/modules/rundeck_job_run.py deleted file mode 120000 index 4f7127fd..00000000 --- a/ansible_collections/community/general/plugins/modules/rundeck_job_run.py +++ /dev/null @@ -1 +0,0 @@ -web_infrastructure/rundeck_job_run.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/rundeck_project.py b/ansible_collections/community/general/plugins/modules/rundeck_project.py deleted file mode 120000 index f3f9f3e5..00000000 --- a/ansible_collections/community/general/plugins/modules/rundeck_project.py +++ /dev/null @@ -1 +0,0 @@ -web_infrastructure/rundeck_project.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/runit.py b/ansible_collections/community/general/plugins/modules/runit.py deleted file mode 120000 index 1418a668..00000000 --- a/ansible_collections/community/general/plugins/modules/runit.py +++ /dev/null @@ -1 +0,0 @@ -system/runit.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/sap_task_list_execute.py b/ansible_collections/community/general/plugins/modules/sap_task_list_execute.py deleted file mode 120000 index c27ac0a6..00000000 --- a/ansible_collections/community/general/plugins/modules/sap_task_list_execute.py +++ /dev/null @@ -1 +0,0 @@ -system/sap_task_list_execute.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/sapcar_extract.py b/ansible_collections/community/general/plugins/modules/sapcar_extract.py deleted file mode 120000 index 140916bb..00000000 --- a/ansible_collections/community/general/plugins/modules/sapcar_extract.py +++ /dev/null @@ -1 +0,0 @@ -files/sapcar_extract.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/say.py b/ansible_collections/community/general/plugins/modules/say.py deleted file mode 120000 index 04301ded..00000000 --- a/ansible_collections/community/general/plugins/modules/say.py +++ /dev/null @@ -1 +0,0 @@ -notification/say.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/scaleway_compute.py b/ansible_collections/community/general/plugins/modules/scaleway_compute.py deleted file mode 120000 index d3151f68..00000000 --- a/ansible_collections/community/general/plugins/modules/scaleway_compute.py +++ /dev/null @@ -1 +0,0 @@ -cloud/scaleway/scaleway_compute.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/scaleway_database_backup.py b/ansible_collections/community/general/plugins/modules/scaleway_database_backup.py deleted file mode 120000 index 6681f78c..00000000 --- a/ansible_collections/community/general/plugins/modules/scaleway_database_backup.py +++ /dev/null @@ -1 +0,0 @@ -cloud/scaleway/scaleway_database_backup.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/scaleway_image_info.py b/ansible_collections/community/general/plugins/modules/scaleway_image_info.py deleted file mode 120000 index 5a976783..00000000 --- a/ansible_collections/community/general/plugins/modules/scaleway_image_info.py +++ /dev/null @@ -1 +0,0 @@ -cloud/scaleway/scaleway_image_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/scaleway_ip.py b/ansible_collections/community/general/plugins/modules/scaleway_ip.py deleted file mode 120000 index abf9da82..00000000 --- a/ansible_collections/community/general/plugins/modules/scaleway_ip.py +++ /dev/null @@ -1 +0,0 @@ -cloud/scaleway/scaleway_ip.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/scaleway_ip_info.py b/ansible_collections/community/general/plugins/modules/scaleway_ip_info.py deleted file mode 120000 index e8d44165..00000000 --- a/ansible_collections/community/general/plugins/modules/scaleway_ip_info.py +++ /dev/null @@ -1 +0,0 @@ -cloud/scaleway/scaleway_ip_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/scaleway_lb.py b/ansible_collections/community/general/plugins/modules/scaleway_lb.py deleted file mode 120000 index a84770de..00000000 --- a/ansible_collections/community/general/plugins/modules/scaleway_lb.py +++ /dev/null @@ -1 +0,0 @@ -cloud/scaleway/scaleway_lb.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/scaleway_organization_info.py b/ansible_collections/community/general/plugins/modules/scaleway_organization_info.py deleted file mode 120000 index 37bb1154..00000000 --- a/ansible_collections/community/general/plugins/modules/scaleway_organization_info.py +++ /dev/null @@ -1 +0,0 @@ -cloud/scaleway/scaleway_organization_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/scaleway_private_network.py b/ansible_collections/community/general/plugins/modules/scaleway_private_network.py deleted file mode 120000 index b35eef41..00000000 --- a/ansible_collections/community/general/plugins/modules/scaleway_private_network.py +++ /dev/null @@ -1 +0,0 @@ -cloud/scaleway/scaleway_private_network.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/scaleway_security_group.py b/ansible_collections/community/general/plugins/modules/scaleway_security_group.py deleted file mode 120000 index c196ced0..00000000 --- a/ansible_collections/community/general/plugins/modules/scaleway_security_group.py +++ /dev/null @@ -1 +0,0 @@ -cloud/scaleway/scaleway_security_group.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/scaleway_security_group_info.py b/ansible_collections/community/general/plugins/modules/scaleway_security_group_info.py deleted file mode 120000 index 04617771..00000000 --- a/ansible_collections/community/general/plugins/modules/scaleway_security_group_info.py +++ /dev/null @@ -1 +0,0 @@ -cloud/scaleway/scaleway_security_group_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/scaleway_security_group_rule.py b/ansible_collections/community/general/plugins/modules/scaleway_security_group_rule.py deleted file mode 120000 index 02966b71..00000000 --- a/ansible_collections/community/general/plugins/modules/scaleway_security_group_rule.py +++ /dev/null @@ -1 +0,0 @@ -cloud/scaleway/scaleway_security_group_rule.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/scaleway_server_info.py b/ansible_collections/community/general/plugins/modules/scaleway_server_info.py deleted file mode 120000 index f3c83aa8..00000000 --- a/ansible_collections/community/general/plugins/modules/scaleway_server_info.py +++ /dev/null @@ -1 +0,0 @@ -cloud/scaleway/scaleway_server_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/scaleway_snapshot_info.py b/ansible_collections/community/general/plugins/modules/scaleway_snapshot_info.py deleted file mode 120000 index 7bd7072b..00000000 --- a/ansible_collections/community/general/plugins/modules/scaleway_snapshot_info.py +++ /dev/null @@ -1 +0,0 @@ -cloud/scaleway/scaleway_snapshot_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/scaleway_sshkey.py b/ansible_collections/community/general/plugins/modules/scaleway_sshkey.py deleted file mode 120000 index 24d24f3a..00000000 --- a/ansible_collections/community/general/plugins/modules/scaleway_sshkey.py +++ /dev/null @@ -1 +0,0 @@ -cloud/scaleway/scaleway_sshkey.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/scaleway_user_data.py b/ansible_collections/community/general/plugins/modules/scaleway_user_data.py deleted file mode 120000 index 4ee78a0a..00000000 --- a/ansible_collections/community/general/plugins/modules/scaleway_user_data.py +++ /dev/null @@ -1 +0,0 @@ -cloud/scaleway/scaleway_user_data.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/scaleway_volume.py b/ansible_collections/community/general/plugins/modules/scaleway_volume.py deleted file mode 120000 index cfb61a3f..00000000 --- a/ansible_collections/community/general/plugins/modules/scaleway_volume.py +++ /dev/null @@ -1 +0,0 @@ -cloud/scaleway/scaleway_volume.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/scaleway_volume_info.py b/ansible_collections/community/general/plugins/modules/scaleway_volume_info.py deleted file mode 120000 index 893e868b..00000000 --- a/ansible_collections/community/general/plugins/modules/scaleway_volume_info.py +++ /dev/null @@ -1 +0,0 @@ -cloud/scaleway/scaleway_volume_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/sefcontext.py b/ansible_collections/community/general/plugins/modules/sefcontext.py deleted file mode 120000 index 776347fc..00000000 --- a/ansible_collections/community/general/plugins/modules/sefcontext.py +++ /dev/null @@ -1 +0,0 @@ -system/sefcontext.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/selinux_permissive.py b/ansible_collections/community/general/plugins/modules/selinux_permissive.py deleted file mode 120000 index 3668b6c6..00000000 --- a/ansible_collections/community/general/plugins/modules/selinux_permissive.py +++ /dev/null @@ -1 +0,0 @@ -system/selinux_permissive.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/selogin.py b/ansible_collections/community/general/plugins/modules/selogin.py deleted file mode 120000 index 616c1b1c..00000000 --- a/ansible_collections/community/general/plugins/modules/selogin.py +++ /dev/null @@ -1 +0,0 @@ -system/selogin.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/sendgrid.py b/ansible_collections/community/general/plugins/modules/sendgrid.py deleted file mode 120000 index e1488472..00000000 --- a/ansible_collections/community/general/plugins/modules/sendgrid.py +++ /dev/null @@ -1 +0,0 @@ -notification/sendgrid.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/sensu_check.py b/ansible_collections/community/general/plugins/modules/sensu_check.py deleted file mode 120000 index e5b454d8..00000000 --- a/ansible_collections/community/general/plugins/modules/sensu_check.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/sensu/sensu_check.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/sensu_client.py b/ansible_collections/community/general/plugins/modules/sensu_client.py deleted file mode 120000 index c4a060bf..00000000 --- a/ansible_collections/community/general/plugins/modules/sensu_client.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/sensu/sensu_client.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/sensu_handler.py b/ansible_collections/community/general/plugins/modules/sensu_handler.py deleted file mode 120000 index fc3c2fbe..00000000 --- a/ansible_collections/community/general/plugins/modules/sensu_handler.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/sensu/sensu_handler.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/sensu_silence.py b/ansible_collections/community/general/plugins/modules/sensu_silence.py deleted file mode 120000 index a1ff99b5..00000000 --- a/ansible_collections/community/general/plugins/modules/sensu_silence.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/sensu/sensu_silence.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/sensu_subscription.py b/ansible_collections/community/general/plugins/modules/sensu_subscription.py deleted file mode 120000 index 0e0082be..00000000 --- a/ansible_collections/community/general/plugins/modules/sensu_subscription.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/sensu/sensu_subscription.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/seport.py b/ansible_collections/community/general/plugins/modules/seport.py deleted file mode 120000 index 29b181da..00000000 --- a/ansible_collections/community/general/plugins/modules/seport.py +++ /dev/null @@ -1 +0,0 @@ -system/seport.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/serverless.py b/ansible_collections/community/general/plugins/modules/serverless.py deleted file mode 120000 index 606c0c31..00000000 --- a/ansible_collections/community/general/plugins/modules/serverless.py +++ /dev/null @@ -1 +0,0 @@ -cloud/misc/serverless.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/shutdown.py b/ansible_collections/community/general/plugins/modules/shutdown.py deleted file mode 120000 index 503b1ec0..00000000 --- a/ansible_collections/community/general/plugins/modules/shutdown.py +++ /dev/null @@ -1 +0,0 @@ -system/shutdown.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/sl_vm.py b/ansible_collections/community/general/plugins/modules/sl_vm.py deleted file mode 120000 index 3ca3caf3..00000000 --- a/ansible_collections/community/general/plugins/modules/sl_vm.py +++ /dev/null @@ -1 +0,0 @@ -cloud/softlayer/sl_vm.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/slack.py b/ansible_collections/community/general/plugins/modules/slack.py deleted file mode 120000 index ca031027..00000000 --- a/ansible_collections/community/general/plugins/modules/slack.py +++ /dev/null @@ -1 +0,0 @@ -notification/slack.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/slackpkg.py b/ansible_collections/community/general/plugins/modules/slackpkg.py deleted file mode 120000 index 71b9d35f..00000000 --- a/ansible_collections/community/general/plugins/modules/slackpkg.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/slackpkg.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/smartos_image_info.py b/ansible_collections/community/general/plugins/modules/smartos_image_info.py deleted file mode 120000 index 5146feee..00000000 --- a/ansible_collections/community/general/plugins/modules/smartos_image_info.py +++ /dev/null @@ -1 +0,0 @@ -cloud/smartos/smartos_image_info.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/snap.py b/ansible_collections/community/general/plugins/modules/snap.py deleted file mode 120000 index 414be228..00000000 --- a/ansible_collections/community/general/plugins/modules/snap.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/snap.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/snap_alias.py b/ansible_collections/community/general/plugins/modules/snap_alias.py deleted file mode 120000 index 6f8c2d5f..00000000 --- a/ansible_collections/community/general/plugins/modules/snap_alias.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/snap_alias.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/snmp_facts.py b/ansible_collections/community/general/plugins/modules/snmp_facts.py deleted file mode 120000 index 5276bf27..00000000 --- a/ansible_collections/community/general/plugins/modules/snmp_facts.py +++ /dev/null @@ -1 +0,0 @@ -net_tools/snmp_facts.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/solaris_zone.py b/ansible_collections/community/general/plugins/modules/solaris_zone.py deleted file mode 120000 index f6878f7b..00000000 --- a/ansible_collections/community/general/plugins/modules/solaris_zone.py +++ /dev/null @@ -1 +0,0 @@ -system/solaris_zone.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/sorcery.py b/ansible_collections/community/general/plugins/modules/sorcery.py deleted file mode 120000 index 06dda3c1..00000000 --- a/ansible_collections/community/general/plugins/modules/sorcery.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/sorcery.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_access_key.py b/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_access_key.py deleted file mode 100644 index 6451d729..00000000 --- a/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_access_key.py +++ /dev/null @@ -1,276 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Evgeniy Krysanov -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: bitbucket_access_key -short_description: Manages Bitbucket repository access keys -description: - - Manages Bitbucket repository access keys (also called deploy keys). -author: - - Evgeniy Krysanov (@catcombo) -extends_documentation_fragment: - - community.general.bitbucket -options: - repository: - description: - - The repository name. - type: str - required: true - workspace: - description: - - The repository owner. - - Alias I(username) has been deprecated and will become an alias of I(user) in community.general 6.0.0. - type: str - required: true - aliases: [ username ] - key: - description: - - The SSH public key. - type: str - label: - description: - - The key label. - type: str - required: true - state: - description: - - Indicates desired state of the access key. - type: str - required: true - choices: [ absent, present ] -notes: - - Bitbucket OAuth consumer or App password should have permissions to read and administrate account repositories. - - Check mode is supported. -''' - -EXAMPLES = r''' -- name: Create access key - community.general.bitbucket_access_key: - repository: 'bitbucket-repo' - workspace: bitbucket_workspace - key: '{{lookup("file", "bitbucket.pub") }}' - label: 'Bitbucket' - state: present - -- name: Delete access key - community.general.bitbucket_access_key: - repository: bitbucket-repo - workspace: bitbucket_workspace - label: Bitbucket - state: absent -''' - -RETURN = r''' # ''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper - -error_messages = { - 'required_key': '`key` is required when the `state` is `present`', - 'required_permission': 'OAuth consumer `client_id` should have permissions to read and administrate the repository', - 'invalid_workspace_or_repo': 'Invalid `repository` or `workspace`', - 'invalid_key': 'Invalid SSH key or key is already in use', -} - -BITBUCKET_API_ENDPOINTS = { - 'deploy-key-list': '%s/2.0/repositories/{workspace}/{repo_slug}/deploy-keys/' % BitbucketHelper.BITBUCKET_API_URL, - 'deploy-key-detail': '%s/2.0/repositories/{workspace}/{repo_slug}/deploy-keys/{key_id}' % BitbucketHelper.BITBUCKET_API_URL, -} - - -def get_existing_deploy_key(module, bitbucket): - """ - Search for an existing deploy key on Bitbucket - with the label specified in module param `label` - - :param module: instance of the :class:`AnsibleModule` - :param bitbucket: instance of the :class:`BitbucketHelper` - :return: existing deploy key or None if not found - :rtype: dict or None - - Return example:: - - { - "id": 123, - "label": "mykey", - "created_on": "2019-03-23T10:15:21.517377+00:00", - "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5", - "type": "deploy_key", - "comment": "", - "last_used": None, - "repository": { - "links": { - "self": { - "href": "https://api.bitbucket.org/2.0/repositories/mleu/test" - }, - "html": { - "href": "https://bitbucket.org/mleu/test" - }, - "avatar": { - "href": "..." - } - }, - "type": "repository", - "name": "test", - "full_name": "mleu/test", - "uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}" - }, - "links": { - "self": { - "href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123" - } - }, - } - """ - content = { - 'next': BITBUCKET_API_ENDPOINTS['deploy-key-list'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], - ) - } - - # Look through the all response pages in search of deploy key we need - while 'next' in content: - info, content = bitbucket.request( - api_url=content['next'], - method='GET', - ) - - if info['status'] == 404: - module.fail_json(msg=error_messages['invalid_workspace_or_repo']) - - if info['status'] == 403: - module.fail_json(msg=error_messages['required_permission']) - - if info['status'] != 200: - module.fail_json(msg='Failed to retrieve the list of deploy keys: {0}'.format(info)) - - res = next(iter(filter(lambda v: v['label'] == module.params['label'], content['values'])), None) - - if res is not None: - return res - - return None - - -def create_deploy_key(module, bitbucket): - info, content = bitbucket.request( - api_url=BITBUCKET_API_ENDPOINTS['deploy-key-list'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], - ), - method='POST', - data={ - 'key': module.params['key'], - 'label': module.params['label'], - }, - ) - - if info['status'] == 404: - module.fail_json(msg=error_messages['invalid_workspace_or_repo']) - - if info['status'] == 403: - module.fail_json(msg=error_messages['required_permission']) - - if info['status'] == 400: - module.fail_json(msg=error_messages['invalid_key']) - - if info['status'] != 200: - module.fail_json(msg='Failed to create deploy key `{label}`: {info}'.format( - label=module.params['label'], - info=info, - )) - - -def delete_deploy_key(module, bitbucket, key_id): - info, content = bitbucket.request( - api_url=BITBUCKET_API_ENDPOINTS['deploy-key-detail'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], - key_id=key_id, - ), - method='DELETE', - ) - - if info['status'] == 404: - module.fail_json(msg=error_messages['invalid_workspace_or_repo']) - - if info['status'] == 403: - module.fail_json(msg=error_messages['required_permission']) - - if info['status'] != 204: - module.fail_json(msg='Failed to delete deploy key `{label}`: {info}'.format( - label=module.params['label'], - info=info, - )) - - -def main(): - argument_spec = BitbucketHelper.bitbucket_argument_spec() - argument_spec.update( - repository=dict(type='str', required=True), - workspace=dict( - type='str', aliases=['username'], required=True, - deprecated_aliases=[dict(name='username', version='6.0.0', collection_name='community.general')], - ), - key=dict(type='str', no_log=False), - label=dict(type='str', required=True), - state=dict(type='str', choices=['present', 'absent'], required=True), - ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=BitbucketHelper.bitbucket_required_one_of(), - required_together=BitbucketHelper.bitbucket_required_together(), - ) - - bitbucket = BitbucketHelper(module) - - key = module.params['key'] - state = module.params['state'] - - # Check parameters - if (key is None) and (state == 'present'): - module.fail_json(msg=error_messages['required_key']) - - # Retrieve access token for authorized API requests - bitbucket.fetch_access_token() - - # Retrieve existing deploy key (if any) - existing_deploy_key = get_existing_deploy_key(module, bitbucket) - changed = False - - # Create new deploy key in case it doesn't exists - if not existing_deploy_key and (state == 'present'): - if not module.check_mode: - create_deploy_key(module, bitbucket) - changed = True - - # Update deploy key if the old value does not match the new one - elif existing_deploy_key and (state == 'present'): - if not key.startswith(existing_deploy_key.get('key')): - if not module.check_mode: - # Bitbucket doesn't support update key for the same label, - # so we need to delete the old one first - delete_deploy_key(module, bitbucket, existing_deploy_key['id']) - create_deploy_key(module, bitbucket) - changed = True - - # Delete deploy key - elif existing_deploy_key and (state == 'absent'): - if not module.check_mode: - delete_deploy_key(module, bitbucket, existing_deploy_key['id']) - changed = True - - module.exit_json(changed=changed) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_key_pair.py b/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_key_pair.py deleted file mode 100644 index 5d42419d..00000000 --- a/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_key_pair.py +++ /dev/null @@ -1,204 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Evgeniy Krysanov -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: bitbucket_pipeline_key_pair -short_description: Manages Bitbucket pipeline SSH key pair -description: - - Manages Bitbucket pipeline SSH key pair. -author: - - Evgeniy Krysanov (@catcombo) -extends_documentation_fragment: - - community.general.bitbucket -options: - repository: - description: - - The repository name. - type: str - required: true - workspace: - description: - - The repository owner. - - Alias I(username) has been deprecated and will become an alias of I(user) in community.general 6.0.0. - type: str - required: true - aliases: [ username ] - public_key: - description: - - The public key. - type: str - private_key: - description: - - The private key. - type: str - state: - description: - - Indicates desired state of the key pair. - type: str - required: true - choices: [ absent, present ] -notes: - - Check mode is supported. -''' - -EXAMPLES = r''' -- name: Create or update SSH key pair - community.general.bitbucket_pipeline_key_pair: - repository: 'bitbucket-repo' - workspace: bitbucket_workspace - public_key: '{{lookup("file", "bitbucket.pub") }}' - private_key: '{{lookup("file", "bitbucket") }}' - state: present - -- name: Remove SSH key pair - community.general.bitbucket_pipeline_key_pair: - repository: bitbucket-repo - workspace: bitbucket_workspace - state: absent -''' - -RETURN = r''' # ''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper - -error_messages = { - 'invalid_params': 'Account, repository or SSH key pair was not found', - 'required_keys': '`public_key` and `private_key` are required when the `state` is `present`', -} - -BITBUCKET_API_ENDPOINTS = { - 'ssh-key-pair': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/ssh/key_pair' % BitbucketHelper.BITBUCKET_API_URL, -} - - -def get_existing_ssh_key_pair(module, bitbucket): - """ - Retrieves an existing ssh key pair from repository - specified in module param `repository` - - :param module: instance of the :class:`AnsibleModule` - :param bitbucket: instance of the :class:`BitbucketHelper` - :return: existing key pair or None if not found - :rtype: dict or None - - Return example:: - - { - "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ...2E8HAeT", - "type": "pipeline_ssh_key_pair" - } - """ - api_url = BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], - ) - - info, content = bitbucket.request( - api_url=api_url, - method='GET', - ) - - if info['status'] == 404: - # Account, repository or SSH key pair was not found. - return None - - return content - - -def update_ssh_key_pair(module, bitbucket): - info, content = bitbucket.request( - api_url=BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], - ), - method='PUT', - data={ - 'private_key': module.params['private_key'], - 'public_key': module.params['public_key'], - }, - ) - - if info['status'] == 404: - module.fail_json(msg=error_messages['invalid_params']) - - if info['status'] != 200: - module.fail_json(msg='Failed to create or update pipeline ssh key pair : {0}'.format(info)) - - -def delete_ssh_key_pair(module, bitbucket): - info, content = bitbucket.request( - api_url=BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], - ), - method='DELETE', - ) - - if info['status'] == 404: - module.fail_json(msg=error_messages['invalid_params']) - - if info['status'] != 204: - module.fail_json(msg='Failed to delete pipeline ssh key pair: {0}'.format(info)) - - -def main(): - argument_spec = BitbucketHelper.bitbucket_argument_spec() - argument_spec.update( - repository=dict(type='str', required=True), - workspace=dict( - type='str', aliases=['username'], required=True, - deprecated_aliases=[dict(name='username', version='6.0.0', collection_name='community.general')], - ), - public_key=dict(type='str'), - private_key=dict(type='str', no_log=True), - state=dict(type='str', choices=['present', 'absent'], required=True), - ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=BitbucketHelper.bitbucket_required_one_of(), - required_together=BitbucketHelper.bitbucket_required_together(), - ) - - bitbucket = BitbucketHelper(module) - - state = module.params['state'] - public_key = module.params['public_key'] - private_key = module.params['private_key'] - - # Check parameters - if ((public_key is None) or (private_key is None)) and (state == 'present'): - module.fail_json(msg=error_messages['required_keys']) - - # Retrieve access token for authorized API requests - bitbucket.fetch_access_token() - - # Retrieve existing ssh key - key_pair = get_existing_ssh_key_pair(module, bitbucket) - changed = False - - # Create or update key pair - if (not key_pair or (key_pair.get('public_key') != public_key)) and (state == 'present'): - if not module.check_mode: - update_ssh_key_pair(module, bitbucket) - changed = True - - # Delete key pair - elif key_pair and (state == 'absent'): - if not module.check_mode: - delete_ssh_key_pair(module, bitbucket) - changed = True - - module.exit_json(changed=changed) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_known_host.py b/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_known_host.py deleted file mode 100644 index 9f4f2b94..00000000 --- a/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_known_host.py +++ /dev/null @@ -1,301 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Evgeniy Krysanov -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: bitbucket_pipeline_known_host -short_description: Manages Bitbucket pipeline known hosts -description: - - Manages Bitbucket pipeline known hosts under the "SSH Keys" menu. - - The host fingerprint will be retrieved automatically, but in case of an error, one can use I(key) field to specify it manually. -author: - - Evgeniy Krysanov (@catcombo) -extends_documentation_fragment: - - community.general.bitbucket -requirements: - - paramiko -options: - repository: - description: - - The repository name. - type: str - required: true - workspace: - description: - - The repository owner. - - Alias I(username) has been deprecated and will become an alias of I(user) in community.general 6.0.0. - type: str - required: true - aliases: [ username ] - name: - description: - - The FQDN of the known host. - type: str - required: true - key: - description: - - The public key. - type: str - state: - description: - - Indicates desired state of the record. - type: str - required: true - choices: [ absent, present ] -notes: - - Check mode is supported. -''' - -EXAMPLES = r''' -- name: Create known hosts from the list - community.general.bitbucket_pipeline_known_host: - repository: 'bitbucket-repo' - workspace: bitbucket_workspace - name: '{{ item }}' - state: present - with_items: - - bitbucket.org - - example.com - -- name: Remove known host - community.general.bitbucket_pipeline_known_host: - repository: bitbucket-repo - workspace: bitbucket_workspace - name: bitbucket.org - state: absent - -- name: Specify public key file - community.general.bitbucket_pipeline_known_host: - repository: bitbucket-repo - workspace: bitbucket_workspace - name: bitbucket.org - key: '{{lookup("file", "bitbucket.pub") }}' - state: absent -''' - -RETURN = r''' # ''' - -import socket - -try: - import paramiko - HAS_PARAMIKO = True -except ImportError: - HAS_PARAMIKO = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper - -error_messages = { - 'invalid_params': 'Account or repository was not found', - 'unknown_key_type': 'Public key type is unknown', -} - -BITBUCKET_API_ENDPOINTS = { - 'known-host-list': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/ssh/known_hosts/' % BitbucketHelper.BITBUCKET_API_URL, - 'known-host-detail': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/ssh/known_hosts/{known_host_uuid}' % BitbucketHelper.BITBUCKET_API_URL, -} - - -def get_existing_known_host(module, bitbucket): - """ - Search for a host in Bitbucket pipelines known hosts - with the name specified in module param `name` - - :param module: instance of the :class:`AnsibleModule` - :param bitbucket: instance of the :class:`BitbucketHelper` - :return: existing host or None if not found - :rtype: dict or None - - Return example:: - - { - 'type': 'pipeline_known_host', - 'uuid': '{21cc0590-bebe-4fae-8baf-03722704119a7}' - 'hostname': 'bitbucket.org', - 'public_key': { - 'type': 'pipeline_ssh_public_key', - 'md5_fingerprint': 'md5:97:8c:1b:f2:6f:14:6b:4b:3b:ec:aa:46:46:74:7c:40', - 'sha256_fingerprint': 'SHA256:zzXQOXSFBEiUtuE8AikoYKwbHaxvSc0ojez9YXaGp1A', - 'key_type': 'ssh-rsa', - 'key': 'AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kN...seeFVBoGqzHM9yXw==' - }, - } - """ - content = { - 'next': BITBUCKET_API_ENDPOINTS['known-host-list'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], - ) - } - - # Look through all response pages in search of hostname we need - while 'next' in content: - info, content = bitbucket.request( - api_url=content['next'], - method='GET', - ) - - if info['status'] == 404: - module.fail_json(msg='Invalid `repository` or `workspace`.') - - if info['status'] != 200: - module.fail_json(msg='Failed to retrieve list of known hosts: {0}'.format(info)) - - host = next(filter(lambda v: v['hostname'] == module.params['name'], content['values']), None) - - if host is not None: - return host - - return None - - -def get_host_key(module, hostname): - """ - Fetches public key for specified host - - :param module: instance of the :class:`AnsibleModule` - :param hostname: host name - :return: key type and key content - :rtype: tuple - - Return example:: - - ( - 'ssh-rsa', - 'AAAAB3NzaC1yc2EAAAABIwAAA...SBne8+seeFVBoGqzHM9yXw==', - ) - """ - try: - sock = socket.socket() - sock.connect((hostname, 22)) - except socket.error: - module.fail_json(msg='Error opening socket to {0}'.format(hostname)) - - try: - trans = paramiko.transport.Transport(sock) - trans.start_client() - host_key = trans.get_remote_server_key() - except paramiko.SSHException: - module.fail_json(msg='SSH error on retrieving {0} server key'.format(hostname)) - - trans.close() - sock.close() - - key_type = host_key.get_name() - key = host_key.get_base64() - - return key_type, key - - -def create_known_host(module, bitbucket): - hostname = module.params['name'] - key_param = module.params['key'] - - if key_param is None: - key_type, key = get_host_key(module, hostname) - elif ' ' in key_param: - key_type, key = key_param.split(' ', 1) - else: - module.fail_json(msg=error_messages['unknown_key_type']) - - info, content = bitbucket.request( - api_url=BITBUCKET_API_ENDPOINTS['known-host-list'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], - ), - method='POST', - data={ - 'hostname': hostname, - 'public_key': { - 'key_type': key_type, - 'key': key, - } - }, - ) - - if info['status'] == 404: - module.fail_json(msg=error_messages['invalid_params']) - - if info['status'] != 201: - module.fail_json(msg='Failed to create known host `{hostname}`: {info}'.format( - hostname=module.params['hostname'], - info=info, - )) - - -def delete_known_host(module, bitbucket, known_host_uuid): - info, content = bitbucket.request( - api_url=BITBUCKET_API_ENDPOINTS['known-host-detail'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], - known_host_uuid=known_host_uuid, - ), - method='DELETE', - ) - - if info['status'] == 404: - module.fail_json(msg=error_messages['invalid_params']) - - if info['status'] != 204: - module.fail_json(msg='Failed to delete known host `{hostname}`: {info}'.format( - hostname=module.params['name'], - info=info, - )) - - -def main(): - argument_spec = BitbucketHelper.bitbucket_argument_spec() - argument_spec.update( - repository=dict(type='str', required=True), - workspace=dict( - type='str', aliases=['username'], required=True, - deprecated_aliases=[dict(name='username', version='6.0.0', collection_name='community.general')], - ), - name=dict(type='str', required=True), - key=dict(type='str', no_log=False), - state=dict(type='str', choices=['present', 'absent'], required=True), - ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=BitbucketHelper.bitbucket_required_one_of(), - required_together=BitbucketHelper.bitbucket_required_together(), - ) - - if (module.params['key'] is None) and (not HAS_PARAMIKO): - module.fail_json(msg='`paramiko` package not found, please install it.') - - bitbucket = BitbucketHelper(module) - - # Retrieve access token for authorized API requests - bitbucket.fetch_access_token() - - # Retrieve existing known host - existing_host = get_existing_known_host(module, bitbucket) - state = module.params['state'] - changed = False - - # Create new host in case it doesn't exists - if not existing_host and (state == 'present'): - if not module.check_mode: - create_known_host(module, bitbucket) - changed = True - - # Delete host - elif existing_host and (state == 'absent'): - if not module.check_mode: - delete_known_host(module, bitbucket, existing_host['uuid']) - changed = True - - module.exit_json(changed=changed) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_variable.py b/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_variable.py deleted file mode 100644 index e5701184..00000000 --- a/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_variable.py +++ /dev/null @@ -1,273 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Evgeniy Krysanov -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: bitbucket_pipeline_variable -short_description: Manages Bitbucket pipeline variables -description: - - Manages Bitbucket pipeline variables. -author: - - Evgeniy Krysanov (@catcombo) -extends_documentation_fragment: - - community.general.bitbucket -options: - repository: - description: - - The repository name. - type: str - required: true - workspace: - description: - - The repository owner. - - Alias I(username) has been deprecated and will become an alias of I(user) in community.general 6.0.0. - type: str - required: true - aliases: [ username ] - name: - description: - - The pipeline variable name. - type: str - required: true - value: - description: - - The pipeline variable value. - type: str - secured: - description: - - Whether to encrypt the variable value. - type: bool - default: no - state: - description: - - Indicates desired state of the variable. - type: str - required: true - choices: [ absent, present ] -notes: - - Check mode is supported. - - For secured values return parameter C(changed) is always C(True). -''' - -EXAMPLES = r''' -- name: Create or update pipeline variables from the list - community.general.bitbucket_pipeline_variable: - repository: 'bitbucket-repo' - workspace: bitbucket_workspace - name: '{{ item.name }}' - value: '{{ item.value }}' - secured: '{{ item.secured }}' - state: present - with_items: - - { name: AWS_ACCESS_KEY, value: ABCD1234, secured: False } - - { name: AWS_SECRET, value: qwe789poi123vbn0, secured: True } - -- name: Remove pipeline variable - community.general.bitbucket_pipeline_variable: - repository: bitbucket-repo - workspace: bitbucket_workspace - name: AWS_ACCESS_KEY - state: absent -''' - -RETURN = r''' # ''' - -from ansible.module_utils.basic import AnsibleModule, _load_params -from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper - -error_messages = { - 'required_value': '`value` is required when the `state` is `present`', -} - -BITBUCKET_API_ENDPOINTS = { - 'pipeline-variable-list': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/variables/' % BitbucketHelper.BITBUCKET_API_URL, - 'pipeline-variable-detail': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/variables/{variable_uuid}' % BitbucketHelper.BITBUCKET_API_URL, -} - - -def get_existing_pipeline_variable(module, bitbucket): - """ - Search for a pipeline variable - - :param module: instance of the :class:`AnsibleModule` - :param bitbucket: instance of the :class:`BitbucketHelper` - :return: existing variable or None if not found - :rtype: dict or None - - Return example:: - - { - 'name': 'AWS_ACCESS_OBKEY_ID', - 'value': 'x7HU80-a2', - 'type': 'pipeline_variable', - 'secured': False, - 'uuid': '{9ddb0507-439a-495a-99f3-5464f15128127}' - } - - The `value` key in dict is absent in case of secured variable. - """ - variables_base_url = BITBUCKET_API_ENDPOINTS['pipeline-variable-list'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], - ) - # Look through the all response pages in search of variable we need - page = 1 - while True: - next_url = "%s?page=%s" % (variables_base_url, page) - info, content = bitbucket.request( - api_url=next_url, - method='GET', - ) - - if info['status'] == 404: - module.fail_json(msg='Invalid `repository` or `workspace`.') - - if info['status'] != 200: - module.fail_json(msg='Failed to retrieve the list of pipeline variables: {0}'.format(info)) - - # We are at the end of list - if 'pagelen' in content and content['pagelen'] == 0: - return None - - page += 1 - var = next(filter(lambda v: v['key'] == module.params['name'], content['values']), None) - - if var is not None: - var['name'] = var.pop('key') - return var - - -def create_pipeline_variable(module, bitbucket): - info, content = bitbucket.request( - api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-list'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], - ), - method='POST', - data={ - 'key': module.params['name'], - 'value': module.params['value'], - 'secured': module.params['secured'], - }, - ) - - if info['status'] != 201: - module.fail_json(msg='Failed to create pipeline variable `{name}`: {info}'.format( - name=module.params['name'], - info=info, - )) - - -def update_pipeline_variable(module, bitbucket, variable_uuid): - info, content = bitbucket.request( - api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-detail'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], - variable_uuid=variable_uuid, - ), - method='PUT', - data={ - 'value': module.params['value'], - 'secured': module.params['secured'], - }, - ) - - if info['status'] != 200: - module.fail_json(msg='Failed to update pipeline variable `{name}`: {info}'.format( - name=module.params['name'], - info=info, - )) - - -def delete_pipeline_variable(module, bitbucket, variable_uuid): - info, content = bitbucket.request( - api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-detail'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], - variable_uuid=variable_uuid, - ), - method='DELETE', - ) - - if info['status'] != 204: - module.fail_json(msg='Failed to delete pipeline variable `{name}`: {info}'.format( - name=module.params['name'], - info=info, - )) - - -class BitBucketPipelineVariable(AnsibleModule): - def __init__(self, *args, **kwargs): - params = _load_params() or {} - if params.get('secured'): - kwargs['argument_spec']['value'].update({'no_log': True}) - super(BitBucketPipelineVariable, self).__init__(*args, **kwargs) - - -def main(): - argument_spec = BitbucketHelper.bitbucket_argument_spec() - argument_spec.update( - repository=dict(type='str', required=True), - workspace=dict( - type='str', aliases=['username'], required=True, - deprecated_aliases=[dict(name='username', version='6.0.0', collection_name='community.general')], - ), - name=dict(type='str', required=True), - value=dict(type='str'), - secured=dict(type='bool', default=False), - state=dict(type='str', choices=['present', 'absent'], required=True), - ) - module = BitBucketPipelineVariable( - argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=BitbucketHelper.bitbucket_required_one_of(), - required_together=BitbucketHelper.bitbucket_required_together(), - ) - - bitbucket = BitbucketHelper(module) - - value = module.params['value'] - state = module.params['state'] - secured = module.params['secured'] - - # Check parameters - if (value is None) and (state == 'present'): - module.fail_json(msg=error_messages['required_value']) - - # Retrieve access token for authorized API requests - bitbucket.fetch_access_token() - - # Retrieve existing pipeline variable (if any) - existing_variable = get_existing_pipeline_variable(module, bitbucket) - changed = False - - # Create new variable in case it doesn't exists - if not existing_variable and (state == 'present'): - if not module.check_mode: - create_pipeline_variable(module, bitbucket) - changed = True - - # Update variable if it is secured or the old value does not match the new one - elif existing_variable and (state == 'present'): - if (existing_variable['secured'] != secured) or (existing_variable.get('value') != value): - if not module.check_mode: - update_pipeline_variable(module, bitbucket, existing_variable['uuid']) - changed = True - - # Delete variable - elif existing_variable and (state == 'absent'): - if not module.check_mode: - delete_pipeline_variable(module, bitbucket, existing_variable['uuid']) - changed = True - - module.exit_json(changed=changed) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/source_control/bzr.py b/ansible_collections/community/general/plugins/modules/source_control/bzr.py deleted file mode 100644 index a4ce4bc0..00000000 --- a/ansible_collections/community/general/plugins/modules/source_control/bzr.py +++ /dev/null @@ -1,194 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2013, André Paramés -# Based on the Git module by Michael DeHaan -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: bzr -author: -- André Paramés (@andreparames) -short_description: Deploy software (or files) from bzr branches -description: - - Manage I(bzr) branches to deploy files or software. -options: - name: - description: - - SSH or HTTP protocol address of the parent branch. - aliases: [ parent ] - required: yes - type: str - dest: - description: - - Absolute path of where the branch should be cloned to. - required: yes - type: path - version: - description: - - What version of the branch to clone. This can be the - bzr revno or revid. - default: head - type: str - force: - description: - - If C(yes), any modified files in the working - tree will be discarded. Before 1.9 the default - value was C(yes). - type: bool - default: 'no' - executable: - description: - - Path to bzr executable to use. If not supplied, - the normal mechanism for resolving binary paths will be used. - type: str -''' - -EXAMPLES = ''' -- name: Checkout - community.general.bzr: - name: bzr+ssh://foosball.example.org/path/to/branch - dest: /srv/checkout - version: 22 -''' - -import os -import re - -from ansible.module_utils.basic import AnsibleModule - - -class Bzr(object): - def __init__(self, module, parent, dest, version, bzr_path): - self.module = module - self.parent = parent - self.dest = dest - self.version = version - self.bzr_path = bzr_path - - def _command(self, args_list, cwd=None, **kwargs): - (rc, out, err) = self.module.run_command([self.bzr_path] + args_list, cwd=cwd, **kwargs) - return (rc, out, err) - - def get_version(self): - '''samples the version of the bzr branch''' - - cmd = "%s revno" % self.bzr_path - rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest) - revno = stdout.strip() - return revno - - def clone(self): - '''makes a new bzr branch if it does not already exist''' - dest_dirname = os.path.dirname(self.dest) - try: - os.makedirs(dest_dirname) - except Exception: - pass - if self.version.lower() != 'head': - args_list = ["branch", "-r", self.version, self.parent, self.dest] - else: - args_list = ["branch", self.parent, self.dest] - return self._command(args_list, check_rc=True, cwd=dest_dirname) - - def has_local_mods(self): - - cmd = "%s status -S" % self.bzr_path - rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest) - lines = stdout.splitlines() - - lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines) - return len(lines) > 0 - - def reset(self, force): - ''' - Resets the index and working tree to head. - Discards any changes to tracked files in the working - tree since that commit. - ''' - if not force and self.has_local_mods(): - self.module.fail_json(msg="Local modifications exist in branch (force=no).") - return self._command(["revert"], check_rc=True, cwd=self.dest) - - def fetch(self): - '''updates branch from remote sources''' - if self.version.lower() != 'head': - (rc, out, err) = self._command(["pull", "-r", self.version], cwd=self.dest) - else: - (rc, out, err) = self._command(["pull"], cwd=self.dest) - if rc != 0: - self.module.fail_json(msg="Failed to pull") - return (rc, out, err) - - def switch_version(self): - '''once pulled, switch to a particular revno or revid''' - if self.version.lower() != 'head': - args_list = ["revert", "-r", self.version] - else: - args_list = ["revert"] - return self._command(args_list, check_rc=True, cwd=self.dest) - - -# =========================================== - -def main(): - module = AnsibleModule( - argument_spec=dict( - dest=dict(type='path', required=True), - name=dict(type='str', required=True, aliases=['parent']), - version=dict(type='str', default='head'), - force=dict(type='bool', default=False), - executable=dict(type='str'), - ) - ) - - dest = module.params['dest'] - parent = module.params['name'] - version = module.params['version'] - force = module.params['force'] - bzr_path = module.params['executable'] or module.get_bin_path('bzr', True) - - bzrconfig = os.path.join(dest, '.bzr', 'branch', 'branch.conf') - - rc, out, err = (0, None, None) - - bzr = Bzr(module, parent, dest, version, bzr_path) - - # if there is no bzr configuration, do a branch operation - # else pull and switch the version - before = None - local_mods = False - if not os.path.exists(bzrconfig): - (rc, out, err) = bzr.clone() - - else: - # else do a pull - local_mods = bzr.has_local_mods() - before = bzr.get_version() - (rc, out, err) = bzr.reset(force) - if rc != 0: - module.fail_json(msg=err) - (rc, out, err) = bzr.fetch() - if rc != 0: - module.fail_json(msg=err) - - # switch to version specified regardless of whether - # we cloned or pulled - (rc, out, err) = bzr.switch_version() - - # determine if we changed anything - after = bzr.get_version() - changed = False - - if before != after or local_mods: - changed = True - - module.exit_json(changed=changed, before=before, after=after) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/source_control/git_config.py b/ansible_collections/community/general/plugins/modules/source_control/git_config.py deleted file mode 100644 index ab713701..00000000 --- a/ansible_collections/community/general/plugins/modules/source_control/git_config.py +++ /dev/null @@ -1,289 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2015, Marius Gedminas -# (c) 2016, Matthew Gamble -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: git_config -author: - - Matthew Gamble (@djmattyg007) - - Marius Gedminas (@mgedmin) -requirements: ['git'] -short_description: Read and write git configuration -description: - - The C(git_config) module changes git configuration by invoking 'git config'. - This is needed if you don't want to use M(ansible.builtin.template) for the entire git - config file (e.g. because you need to change just C(user.email) in - /etc/.git/config). Solutions involving M(ansible.builtin.command) are cumbersome or - don't work correctly in check mode. -options: - list_all: - description: - - List all settings (optionally limited to a given I(scope)). - type: bool - default: 'no' - name: - description: - - The name of the setting. If no value is supplied, the value will - be read from the config if it has been set. - type: str - repo: - description: - - Path to a git repository for reading and writing values from a - specific repo. - type: path - file: - description: - - Path to an adhoc git configuration file to be managed using the C(file) scope. - type: path - version_added: 2.0.0 - scope: - description: - - Specify which scope to read/set values from. - - This is required when setting config values. - - If this is set to C(local), you must also specify the C(repo) parameter. - - If this is set to C(file), you must also specify the C(file) parameter. - - It defaults to system only when not using I(list_all)=C(yes). - choices: [ "file", "local", "global", "system" ] - type: str - state: - description: - - "Indicates the setting should be set/unset. - This parameter has higher precedence than I(value) parameter: - when I(state)=absent and I(value) is defined, I(value) is discarded." - choices: [ 'present', 'absent' ] - default: 'present' - type: str - value: - description: - - When specifying the name of a single setting, supply a value to - set that setting to the given value. - type: str -''' - -EXAMPLES = ''' -- name: Add a setting to ~/.gitconfig - community.general.git_config: - name: alias.ci - scope: global - value: commit - -- name: Add a setting to ~/.gitconfig - community.general.git_config: - name: alias.st - scope: global - value: status - -- name: Remove a setting from ~/.gitconfig - community.general.git_config: - name: alias.ci - scope: global - state: absent - -- name: Add a setting to ~/.gitconfig - community.general.git_config: - name: core.editor - scope: global - value: vim - -- name: Add a setting system-wide - community.general.git_config: - name: alias.remotev - scope: system - value: remote -v - -- name: Add a setting to a system scope (default) - community.general.git_config: - name: alias.diffc - value: diff --cached - -- name: Add a setting to a system scope (default) - community.general.git_config: - name: color.ui - value: auto - -- name: Make etckeeper not complaining when it is invoked by cron - community.general.git_config: - name: user.email - repo: /etc - scope: local - value: 'root@{{ ansible_fqdn }}' - -- name: Read individual values from git config - community.general.git_config: - name: alias.ci - scope: global - -- name: Scope system is also assumed when reading values, unless list_all=yes - community.general.git_config: - name: alias.diffc - -- name: Read all values from git config - community.general.git_config: - list_all: yes - scope: global - -- name: When list_all is yes and no scope is specified, you get configuration from all scopes - community.general.git_config: - list_all: yes - -- name: Specify a repository to include local settings - community.general.git_config: - list_all: yes - repo: /path/to/repo.git -''' - -RETURN = ''' ---- -config_value: - description: When list_all=no and value is not set, a string containing the value of the setting in name - returned: success - type: str - sample: "vim" - -config_values: - description: When list_all=yes, a dict containing key/value pairs of multiple configuration settings - returned: success - type: dict - sample: - core.editor: "vim" - color.ui: "auto" - alias.diffc: "diff --cached" - alias.remotev: "remote -v" -''' -import os - -from ansible.module_utils.basic import AnsibleModule - - -def main(): - module = AnsibleModule( - argument_spec=dict( - list_all=dict(required=False, type='bool', default=False), - name=dict(type='str'), - repo=dict(type='path'), - file=dict(type='path'), - scope=dict(required=False, type='str', choices=['file', 'local', 'global', 'system']), - state=dict(required=False, type='str', default='present', choices=['present', 'absent']), - value=dict(required=False), - ), - mutually_exclusive=[['list_all', 'name'], ['list_all', 'value'], ['list_all', 'state']], - required_if=[ - ('scope', 'local', ['repo']), - ('scope', 'file', ['file']) - ], - required_one_of=[['list_all', 'name']], - supports_check_mode=True, - ) - git_path = module.get_bin_path('git', True) - - params = module.params - # We check error message for a pattern, so we need to make sure the messages appear in the form we're expecting. - # Set the locale to C to ensure consistent messages. - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') - - if params['name']: - name = params['name'] - else: - name = None - - if params['scope']: - scope = params['scope'] - elif params['list_all']: - scope = None - else: - scope = 'system' - - if params['state'] == 'absent': - unset = 'unset' - params['value'] = None - else: - unset = None - - if params['value']: - new_value = params['value'] - else: - new_value = None - - args = [git_path, "config", "--includes"] - if params['list_all']: - args.append('-l') - if scope == 'file': - args.append('-f') - args.append(params['file']) - elif scope: - args.append("--" + scope) - if name: - args.append(name) - - if scope == 'local': - dir = params['repo'] - elif params['list_all'] and params['repo']: - # Include local settings from a specific repo when listing all available settings - dir = params['repo'] - else: - # Run from root directory to avoid accidentally picking up any local config settings - dir = "/" - - (rc, out, err) = module.run_command(args, cwd=dir, expand_user_and_vars=False) - if params['list_all'] and scope and rc == 128 and 'unable to read config file' in err: - # This just means nothing has been set at the given scope - module.exit_json(changed=False, msg='', config_values={}) - elif rc >= 2: - # If the return code is 1, it just means the option hasn't been set yet, which is fine. - module.fail_json(rc=rc, msg=err, cmd=' '.join(args)) - - if params['list_all']: - values = out.rstrip().splitlines() - config_values = {} - for value in values: - k, v = value.split('=', 1) - config_values[k] = v - module.exit_json(changed=False, msg='', config_values=config_values) - elif not new_value and not unset: - module.exit_json(changed=False, msg='', config_value=out.rstrip()) - elif unset and not out: - module.exit_json(changed=False, msg='no setting to unset') - else: - old_value = out.rstrip() - if old_value == new_value: - module.exit_json(changed=False, msg="") - - if not module.check_mode: - if unset: - args.insert(len(args) - 1, "--" + unset) - cmd = args - else: - cmd = args + [new_value] - try: # try using extra parameter from ansible-base 2.10.4 onwards - (rc, out, err) = module.run_command(cmd, cwd=dir, ignore_invalid_cwd=False, expand_user_and_vars=False) - except TypeError: - # @TODO remove try/except when community.general drop support for 2.10.x - if not os.path.isdir(dir): - module.fail_json(msg="Cannot find directory '{0}'".format(dir)) - (rc, out, err) = module.run_command(cmd, cwd=dir, expand_user_and_vars=False) - if err: - module.fail_json(rc=rc, msg=err, cmd=cmd) - - module.exit_json( - msg='setting changed', - diff=dict( - before_header=' '.join(args), - before=old_value + "\n", - after_header=' '.join(args), - after=(new_value or '') + "\n" - ), - changed=True - ) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/source_control/github/github_deploy_key.py b/ansible_collections/community/general/plugins/modules/source_control/github/github_deploy_key.py deleted file mode 100644 index 7a67a123..00000000 --- a/ansible_collections/community/general/plugins/modules/source_control/github/github_deploy_key.py +++ /dev/null @@ -1,342 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: github_deploy_key -author: "Ali (@bincyber)" -short_description: Manages deploy keys for GitHub repositories. -description: - - "Adds or removes deploy keys for GitHub repositories. Supports authentication using username and password, - username and password and 2-factor authentication code (OTP), OAuth2 token, or personal access token. Admin - rights on the repository are required." -options: - github_url: - description: - - The base URL of the GitHub API - required: false - type: str - version_added: '0.2.0' - default: https://api.github.com - owner: - description: - - The name of the individual account or organization that owns the GitHub repository. - required: true - aliases: [ 'account', 'organization' ] - type: str - repo: - description: - - The name of the GitHub repository. - required: true - aliases: [ 'repository' ] - type: str - name: - description: - - The name for the deploy key. - required: true - aliases: [ 'title', 'label' ] - type: str - key: - description: - - The SSH public key to add to the repository as a deploy key. - required: true - type: str - read_only: - description: - - If C(true), the deploy key will only be able to read repository contents. Otherwise, the deploy key will be able to read and write. - type: bool - default: 'yes' - state: - description: - - The state of the deploy key. - default: "present" - choices: [ "present", "absent" ] - type: str - force: - description: - - If C(true), forcefully adds the deploy key by deleting any existing deploy key with the same public key or title. - type: bool - default: 'no' - username: - description: - - The username to authenticate with. Should not be set when using personal access token - type: str - password: - description: - - The password to authenticate with. Alternatively, a personal access token can be used instead of I(username) and I(password) combination. - type: str - token: - description: - - The OAuth2 token or personal access token to authenticate with. Mutually exclusive with I(password). - type: str - otp: - description: - - The 6 digit One Time Password for 2-Factor Authentication. Required together with I(username) and I(password). - - Alias C(2fa_token) has been deprecated and will be removed in community.general 5.0.0. - aliases: ['2fa_token'] - type: int -notes: - - "Refer to GitHub's API documentation here: https://developer.github.com/v3/repos/keys/." -''' - -EXAMPLES = ''' -- name: Add a new read-only deploy key to a GitHub repository using basic authentication - community.general.github_deploy_key: - owner: "johndoe" - repo: "example" - name: "new-deploy-key" - key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..." - read_only: yes - username: "johndoe" - password: "supersecretpassword" - -- name: Remove an existing deploy key from a GitHub repository - community.general.github_deploy_key: - owner: "johndoe" - repository: "example" - name: "new-deploy-key" - key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..." - force: yes - username: "johndoe" - password: "supersecretpassword" - state: absent - -- name: Add a new deploy key to a GitHub repository, replace an existing key, use an OAuth2 token to authenticate - community.general.github_deploy_key: - owner: "johndoe" - repository: "example" - name: "new-deploy-key" - key: "{{ lookup('file', '~/.ssh/github.pub') }}" - force: yes - token: "ABAQDAwXxn7kIMNWzcDfo..." - -- name: Re-add a deploy key to a GitHub repository but with a different name - community.general.github_deploy_key: - owner: "johndoe" - repository: "example" - name: "replace-deploy-key" - key: "{{ lookup('file', '~/.ssh/github.pub') }}" - username: "johndoe" - password: "supersecretpassword" - -- name: Add a new deploy key to a GitHub repository using 2FA - community.general.github_deploy_key: - owner: "johndoe" - repo: "example" - name: "new-deploy-key-2" - key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..." - username: "johndoe" - password: "supersecretpassword" - otp: 123456 - -- name: Add a read-only deploy key to a repository hosted on GitHub Enterprise - community.general.github_deploy_key: - github_url: "https://api.example.com" - owner: "janedoe" - repo: "example" - name: "new-deploy-key" - key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..." - read_only: yes - username: "janedoe" - password: "supersecretpassword" -''' - -RETURN = ''' -msg: - description: the status message describing what occurred - returned: always - type: str - sample: "Deploy key added successfully" - -http_status_code: - description: the HTTP status code returned by the GitHub API - returned: failed - type: int - sample: 400 - -error: - description: the error message returned by the GitHub API - returned: failed - type: str - sample: "key is already in use" - -id: - description: the key identifier assigned by GitHub for the deploy key - returned: changed - type: int - sample: 24381901 -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url -from re import findall - - -class GithubDeployKey(object): - def __init__(self, module): - self.module = module - - self.github_url = self.module.params['github_url'] - self.name = module.params['name'] - self.key = module.params['key'] - self.state = module.params['state'] - self.read_only = module.params.get('read_only', True) - self.force = module.params.get('force', False) - self.username = module.params.get('username', None) - self.password = module.params.get('password', None) - self.token = module.params.get('token', None) - self.otp = module.params.get('otp', None) - - @property - def url(self): - owner = self.module.params['owner'] - repo = self.module.params['repo'] - return "{0}/repos/{1}/{2}/keys".format(self.github_url, owner, repo) - - @property - def headers(self): - if self.username is not None and self.password is not None: - self.module.params['url_username'] = self.username - self.module.params['url_password'] = self.password - self.module.params['force_basic_auth'] = True - if self.otp is not None: - return {"X-GitHub-OTP": self.otp} - elif self.token is not None: - return {"Authorization": "token {0}".format(self.token)} - else: - return None - - def paginate(self, url): - while url: - resp, info = fetch_url(self.module, url, headers=self.headers, method="GET") - - if info["status"] == 200: - yield self.module.from_json(resp.read()) - - links = {} - for x, y in findall(r'<([^>]+)>;\s*rel="(\w+)"', info["link"]): - links[y] = x - - url = links.get('next') - else: - self.handle_error(method="GET", info=info) - - def get_existing_key(self): - for keys in self.paginate(self.url): - if keys: - for i in keys: - existing_key_id = str(i["id"]) - if i["key"].split() == self.key.split()[:2]: - return existing_key_id - elif i['title'] == self.name and self.force: - return existing_key_id - else: - return None - - def add_new_key(self): - request_body = {"title": self.name, "key": self.key, "read_only": self.read_only} - - resp, info = fetch_url(self.module, self.url, data=self.module.jsonify(request_body), headers=self.headers, method="POST", timeout=30) - - status_code = info["status"] - - if status_code == 201: - response_body = self.module.from_json(resp.read()) - key_id = response_body["id"] - self.module.exit_json(changed=True, msg="Deploy key successfully added", id=key_id) - elif status_code == 422: - self.module.exit_json(changed=False, msg="Deploy key already exists") - else: - self.handle_error(method="POST", info=info) - - def remove_existing_key(self, key_id): - resp, info = fetch_url(self.module, "{0}/{1}".format(self.url, key_id), headers=self.headers, method="DELETE") - - status_code = info["status"] - - if status_code == 204: - if self.state == 'absent': - self.module.exit_json(changed=True, msg="Deploy key successfully deleted", id=key_id) - else: - self.handle_error(method="DELETE", info=info, key_id=key_id) - - def handle_error(self, method, info, key_id=None): - status_code = info['status'] - body = info.get('body') - if body: - err = self.module.from_json(body)['message'] - - if status_code == 401: - self.module.fail_json(msg="Failed to connect to {0} due to invalid credentials".format(self.github_url), http_status_code=status_code, error=err) - elif status_code == 404: - self.module.fail_json(msg="GitHub repository does not exist", http_status_code=status_code, error=err) - else: - if method == "GET": - self.module.fail_json(msg="Failed to retrieve existing deploy keys", http_status_code=status_code, error=err) - elif method == "POST": - self.module.fail_json(msg="Failed to add deploy key", http_status_code=status_code, error=err) - elif method == "DELETE": - self.module.fail_json(msg="Failed to delete existing deploy key", id=key_id, http_status_code=status_code, error=err) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - github_url=dict(required=False, type='str', default="https://api.github.com"), - owner=dict(required=True, type='str', aliases=['account', 'organization']), - repo=dict(required=True, type='str', aliases=['repository']), - name=dict(required=True, type='str', aliases=['title', 'label']), - key=dict(required=True, type='str', no_log=False), - read_only=dict(required=False, type='bool', default=True), - state=dict(default='present', choices=['present', 'absent']), - force=dict(required=False, type='bool', default=False), - username=dict(required=False, type='str'), - password=dict(required=False, type='str', no_log=True), - otp=dict( - required=False, type='int', aliases=['2fa_token'], no_log=True, - deprecated_aliases=[dict(name='2fa_token', version='5.0.0', collection_name='community.general')]), - token=dict(required=False, type='str', no_log=True) - ), - mutually_exclusive=[ - ['password', 'token'] - ], - required_together=[ - ['username', 'password'], - ['otp', 'username', 'password'] - ], - required_one_of=[ - ['username', 'token'] - ], - supports_check_mode=True, - ) - - deploy_key = GithubDeployKey(module) - - if module.check_mode: - key_id = deploy_key.get_existing_key() - if deploy_key.state == "present" and key_id is None: - module.exit_json(changed=True) - elif deploy_key.state == "present" and key_id is not None: - module.exit_json(changed=False) - - # to forcefully modify an existing key, the existing key must be deleted first - if deploy_key.state == 'absent' or deploy_key.force: - key_id = deploy_key.get_existing_key() - - if key_id is not None: - deploy_key.remove_existing_key(key_id) - elif deploy_key.state == 'absent': - module.exit_json(changed=False, msg="Deploy key does not exist") - - if deploy_key.state == "present": - deploy_key.add_new_key() - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/source_control/github/github_issue.py b/ansible_collections/community/general/plugins/modules/source_control/github/github_issue.py deleted file mode 100644 index 4add29f3..00000000 --- a/ansible_collections/community/general/plugins/modules/source_control/github/github_issue.py +++ /dev/null @@ -1,116 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017-18, Abhijeet Kasurde -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: github_issue -short_description: View GitHub issue. -description: - - View GitHub issue for a given repository and organization. -options: - repo: - description: - - Name of repository from which issue needs to be retrieved. - required: true - type: str - organization: - description: - - Name of the GitHub organization in which the repository is hosted. - required: true - type: str - issue: - description: - - Issue number for which information is required. - required: true - type: int - action: - description: - - Get various details about issue depending upon action specified. - default: 'get_status' - choices: - - 'get_status' - type: str -author: - - Abhijeet Kasurde (@Akasurde) -''' - -RETURN = ''' -issue_status: - description: State of the GitHub issue - type: str - returned: success - sample: open, closed -''' - -EXAMPLES = ''' -- name: Check if GitHub issue is closed or not - community.general.github_issue: - organization: ansible - repo: ansible - issue: 23642 - action: get_status - register: r - -- name: Take action depending upon issue status - ansible.builtin.debug: - msg: Do something when issue 23642 is open - when: r.issue_status == 'open' -''' - -import json - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url - - -def main(): - module = AnsibleModule( - argument_spec=dict( - organization=dict(required=True), - repo=dict(required=True), - issue=dict(type='int', required=True), - action=dict(choices=['get_status'], default='get_status'), - ), - supports_check_mode=True, - ) - - organization = module.params['organization'] - repo = module.params['repo'] - issue = module.params['issue'] - action = module.params['action'] - - result = dict() - - headers = { - 'Content-Type': 'application/json', - 'Accept': 'application/vnd.github.v3+json', - } - - url = "https://api.github.com/repos/%s/%s/issues/%s" % (organization, repo, issue) - - response, info = fetch_url(module, url, headers=headers) - if not (200 <= info['status'] < 400): - if info['status'] == 404: - module.fail_json(msg="Failed to find issue %s" % issue) - module.fail_json(msg="Failed to send request to %s: %s" % (url, info['msg'])) - - gh_obj = json.loads(response.read()) - - if action == 'get_status' or action is None: - if module.check_mode: - result.update(changed=True) - else: - result.update(changed=True, issue_status=gh_obj['state']) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/source_control/github/github_key.py b/ansible_collections/community/general/plugins/modules/source_control/github/github_key.py deleted file mode 100644 index 2afbe29a..00000000 --- a/ansible_collections/community/general/plugins/modules/source_control/github/github_key.py +++ /dev/null @@ -1,242 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: github_key -short_description: Manage GitHub access keys. -description: - - Creates, removes, or updates GitHub access keys. -options: - token: - description: - - GitHub Access Token with permission to list and create public keys. - required: true - type: str - name: - description: - - SSH key name - required: true - type: str - pubkey: - description: - - SSH public key value. Required when C(state=present). - type: str - state: - description: - - Whether to remove a key, ensure that it exists, or update its value. - choices: ['present', 'absent'] - default: 'present' - type: str - force: - description: - - The default is C(yes), which will replace the existing remote key - if it's different than C(pubkey). If C(no), the key will only be - set if no key with the given C(name) exists. - type: bool - default: 'yes' - -author: Robert Estelle (@erydo) -''' - -RETURN = ''' -deleted_keys: - description: An array of key objects that were deleted. Only present on state=absent - type: list - returned: When state=absent - sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}] -matching_keys: - description: An array of keys matching the specified name. Only present on state=present - type: list - returned: When state=present - sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}] -key: - description: Metadata about the key just created. Only present on state=present - type: dict - returned: success - sample: {'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False} -''' - -EXAMPLES = ''' -- name: Read SSH public key to authorize - ansible.builtin.shell: cat /home/foo/.ssh/id_rsa.pub - register: ssh_pub_key - -- name: Authorize key with GitHub - local_action: - module: github_key - name: Access Key for Some Machine - token: '{{ github_access_token }}' - pubkey: '{{ ssh_pub_key.stdout }}' -''' - - -import json -import re - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url - - -API_BASE = 'https://api.github.com' - - -class GitHubResponse(object): - def __init__(self, response, info): - self.content = response.read() - self.info = info - - def json(self): - return json.loads(self.content) - - def links(self): - links = {} - if 'link' in self.info: - link_header = self.info['link'] - matches = re.findall('<([^>]+)>; rel="([^"]+)"', link_header) - for url, rel in matches: - links[rel] = url - return links - - -class GitHubSession(object): - def __init__(self, module, token): - self.module = module - self.token = token - - def request(self, method, url, data=None): - headers = { - 'Authorization': 'token %s' % self.token, - 'Content-Type': 'application/json', - 'Accept': 'application/vnd.github.v3+json', - } - response, info = fetch_url( - self.module, url, method=method, data=data, headers=headers) - if not (200 <= info['status'] < 400): - self.module.fail_json( - msg=(" failed to send request %s to %s: %s" - % (method, url, info['msg']))) - return GitHubResponse(response, info) - - -def get_all_keys(session): - url = API_BASE + '/user/keys' - result = [] - while url: - r = session.request('GET', url) - result.extend(r.json()) - url = r.links().get('next') - return result - - -def create_key(session, name, pubkey, check_mode): - if check_mode: - from datetime import datetime - now = datetime.utcnow() - return { - 'id': 0, - 'key': pubkey, - 'title': name, - 'url': 'http://example.com/CHECK_MODE_GITHUB_KEY', - 'created_at': datetime.strftime(now, '%Y-%m-%dT%H:%M:%SZ'), - 'read_only': False, - 'verified': False - } - else: - return session.request( - 'POST', - API_BASE + '/user/keys', - data=json.dumps({'title': name, 'key': pubkey})).json() - - -def delete_keys(session, to_delete, check_mode): - if check_mode: - return - - for key in to_delete: - session.request('DELETE', API_BASE + '/user/keys/%s' % key["id"]) - - -def ensure_key_absent(session, name, check_mode): - to_delete = [key for key in get_all_keys(session) if key['title'] == name] - delete_keys(session, to_delete, check_mode=check_mode) - - return {'changed': bool(to_delete), - 'deleted_keys': to_delete} - - -def ensure_key_present(module, session, name, pubkey, force, check_mode): - all_keys = get_all_keys(session) - matching_keys = [k for k in all_keys if k['title'] == name] - deleted_keys = [] - - new_signature = pubkey.split(' ')[1] - for key in all_keys: - existing_signature = key['key'].split(' ')[1] - if new_signature == existing_signature and key['title'] != name: - module.fail_json(msg=( - "another key with the same content is already registered " - "under the name |{0}|").format(key['title'])) - - if matching_keys and force and matching_keys[0]['key'].split(' ')[1] != new_signature: - delete_keys(session, matching_keys, check_mode=check_mode) - (deleted_keys, matching_keys) = (matching_keys, []) - - if not matching_keys: - key = create_key(session, name, pubkey, check_mode=check_mode) - else: - key = matching_keys[0] - - return { - 'changed': bool(deleted_keys or not matching_keys), - 'deleted_keys': deleted_keys, - 'matching_keys': matching_keys, - 'key': key - } - - -def main(): - argument_spec = { - 'token': {'required': True, 'no_log': True}, - 'name': {'required': True}, - 'pubkey': {}, - 'state': {'choices': ['present', 'absent'], 'default': 'present'}, - 'force': {'default': True, 'type': 'bool'}, - } - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - token = module.params['token'] - name = module.params['name'] - state = module.params['state'] - force = module.params['force'] - pubkey = module.params.get('pubkey') - - if pubkey: - pubkey_parts = pubkey.split(' ') - # Keys consist of a protocol, the key data, and an optional comment. - if len(pubkey_parts) < 2: - module.fail_json(msg='"pubkey" parameter has an invalid format') - elif state == 'present': - module.fail_json(msg='"pubkey" is required when state=present') - - session = GitHubSession(module, token) - if state == 'present': - result = ensure_key_present(module, session, name, pubkey, force=force, - check_mode=module.check_mode) - elif state == 'absent': - result = ensure_key_absent(session, name, check_mode=module.check_mode) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/source_control/github/github_release.py b/ansible_collections/community/general/plugins/modules/source_control/github/github_release.py deleted file mode 100644 index 654dce5f..00000000 --- a/ansible_collections/community/general/plugins/modules/source_control/github/github_release.py +++ /dev/null @@ -1,222 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Ansible Team -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: github_release -short_description: Interact with GitHub Releases -description: - - Fetch metadata about GitHub Releases -options: - token: - description: - - GitHub Personal Access Token for authenticating. Mutually exclusive with C(password). - type: str - user: - description: - - The GitHub account that owns the repository - type: str - required: true - password: - description: - - The GitHub account password for the user. Mutually exclusive with C(token). - type: str - repo: - description: - - Repository name - type: str - required: true - action: - description: - - Action to perform - type: str - required: true - choices: [ 'latest_release', 'create_release' ] - tag: - description: - - Tag name when creating a release. Required when using action is set to C(create_release). - type: str - target: - description: - - Target of release when creating a release - type: str - name: - description: - - Name of release when creating a release - type: str - body: - description: - - Description of the release when creating a release - type: str - draft: - description: - - Sets if the release is a draft or not. (boolean) - type: 'bool' - default: 'no' - prerelease: - description: - - Sets if the release is a prerelease or not. (boolean) - type: bool - default: 'no' - -author: - - "Adrian Moisey (@adrianmoisey)" -requirements: - - "github3.py >= 1.0.0a3" -''' - -EXAMPLES = ''' -- name: Get latest release of a public repository - community.general.github_release: - user: ansible - repo: ansible - action: latest_release - -- name: Get latest release of testuseer/testrepo - community.general.github_release: - token: tokenabc1234567890 - user: testuser - repo: testrepo - action: latest_release - -- name: Get latest release of test repo using username and password. Ansible 2.4. - community.general.github_release: - user: testuser - password: secret123 - repo: testrepo - action: latest_release - -- name: Create a new release - community.general.github_release: - token: tokenabc1234567890 - user: testuser - repo: testrepo - action: create_release - tag: test - target: master - name: My Release - body: Some description - -''' - -RETURN = ''' -create_release: - description: - - Version of the created release - - "For Ansible version 2.5 and later, if specified release version already exists, then State is unchanged" - - "For Ansible versions prior to 2.5, if specified release version already exists, then State is skipped" - type: str - returned: success - sample: 1.1.0 - -latest_release: - description: Version of the latest release - type: str - returned: success - sample: 1.1.0 -''' - -import traceback - -GITHUB_IMP_ERR = None -try: - import github3 - - HAS_GITHUB_API = True -except ImportError: - GITHUB_IMP_ERR = traceback.format_exc() - HAS_GITHUB_API = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -def main(): - module = AnsibleModule( - argument_spec=dict( - repo=dict(required=True), - user=dict(required=True), - password=dict(no_log=True), - token=dict(no_log=True), - action=dict( - required=True, choices=['latest_release', 'create_release']), - tag=dict(type='str'), - target=dict(type='str'), - name=dict(type='str'), - body=dict(type='str'), - draft=dict(type='bool', default=False), - prerelease=dict(type='bool', default=False), - ), - supports_check_mode=True, - mutually_exclusive=(('password', 'token'),), - required_if=[('action', 'create_release', ['tag']), - ('action', 'create_release', ['password', 'token'], True)], - ) - - if not HAS_GITHUB_API: - module.fail_json(msg=missing_required_lib('github3.py >= 1.0.0a3'), - exception=GITHUB_IMP_ERR) - - repo = module.params['repo'] - user = module.params['user'] - password = module.params['password'] - login_token = module.params['token'] - action = module.params['action'] - tag = module.params.get('tag') - target = module.params.get('target') - name = module.params.get('name') - body = module.params.get('body') - draft = module.params.get('draft') - prerelease = module.params.get('prerelease') - - # login to github - try: - if password: - gh_obj = github3.login(user, password=password) - elif login_token: - gh_obj = github3.login(token=login_token) - else: - gh_obj = github3.GitHub() - - # test if we're actually logged in - if password or login_token: - gh_obj.me() - except github3.exceptions.AuthenticationFailed as e: - module.fail_json(msg='Failed to connect to GitHub: %s' % to_native(e), - details="Please check username and password or token " - "for repository %s" % repo) - - repository = gh_obj.repository(user, repo) - - if not repository: - module.fail_json(msg="Repository %s/%s doesn't exist" % (user, repo)) - - if action == 'latest_release': - release = repository.latest_release() - if release: - module.exit_json(tag=release.tag_name) - else: - module.exit_json(tag=None) - - if action == 'create_release': - release_exists = repository.release_from_tag(tag) - if release_exists: - module.exit_json(changed=False, msg="Release for tag %s already exists." % tag) - - release = repository.create_release( - tag, target, name, body, draft, prerelease) - if release: - module.exit_json(changed=True, tag=release.tag_name) - else: - module.exit_json(changed=False, tag=None) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/source_control/github/github_repo.py b/ansible_collections/community/general/plugins/modules/source_control/github/github_repo.py deleted file mode 100644 index 1446e4ab..00000000 --- a/ansible_collections/community/general/plugins/modules/source_control/github/github_repo.py +++ /dev/null @@ -1,273 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Álvaro Torres Cogollo -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: github_repo -short_description: Manage your repositories on Github -version_added: 2.2.0 -description: -- Manages Github repositories using PyGithub library. -- Authentication can be done with I(access_token) or with I(username) and I(password). -options: - username: - description: - - Username used for authentication. - - This is only needed when not using I(access_token). - type: str - required: false - password: - description: - - Password used for authentication. - - This is only needed when not using I(access_token). - type: str - required: false - access_token: - description: - - Token parameter for authentication. - - This is only needed when not using I(username) and I(password). - type: str - required: false - name: - description: - - Repository name. - type: str - required: true - description: - description: - - Description for the repository. - - Defaults to empty if I(force_defaults=true), which is the default in this module. - - Defaults to empty if I(force_defaults=false) when creating a new repository. - - This is only used when I(state) is C(present). - type: str - required: false - private: - description: - - Whether the repository should be private or not. - - Defaults to C(false) if I(force_defaults=true), which is the default in this module. - - Defaults to C(false) if I(force_defaults=false) when creating a new repository. - - This is only used when I(state) is C(present). - type: bool - required: false - state: - description: - - Whether the repository should exist or not. - type: str - default: present - choices: [ absent, present ] - required: false - organization: - description: - - Organization for the repository. - - When I(state) is C(present), the repository will be created in the current user profile. - type: str - required: false - api_url: - description: - - URL to the GitHub API if not using github.com but you own instance. - type: str - default: 'https://api.github.com' - version_added: "3.5.0" - force_defaults: - description: - - Overwrite current I(description) and I(private) attributes with defaults if set to C(true), which currently is the default. - - The default for this option will be deprecated in a future version of this collection, and eventually change to C(false). - type: bool - default: true - required: false - version_added: 4.1.0 -requirements: -- PyGithub>=1.54 -notes: -- For Python 3, PyGithub>=1.54 should be used. -- "For Python 3.5, PyGithub==1.54 should be used. More information: U(https://pygithub.readthedocs.io/en/latest/changes.html#version-1-54-november-30-2020)." -- "For Python 2.7, PyGithub==1.45 should be used. More information: U(https://pygithub.readthedocs.io/en/latest/changes.html#version-1-45-december-29-2019)." -- Supports C(check_mode). -author: -- Álvaro Torres Cogollo (@atorrescogollo) -''' - -EXAMPLES = ''' -- name: Create a Github repository - community.general.github_repo: - access_token: mytoken - organization: MyOrganization - name: myrepo - description: "Just for fun" - private: yes - state: present - force_defaults: no - register: result - -- name: Delete the repository - community.general.github_repo: - username: octocat - password: password - organization: MyOrganization - name: myrepo - state: absent - register: result -''' - -RETURN = ''' -repo: - description: Repository information as JSON. See U(https://docs.github.com/en/rest/reference/repos#get-a-repository). - returned: success and I(state) is C(present) - type: dict -''' - -import traceback -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -import sys - -GITHUB_IMP_ERR = None -try: - from github import Github, GithubException, GithubObject - from github.GithubException import UnknownObjectException - HAS_GITHUB_PACKAGE = True -except Exception: - GITHUB_IMP_ERR = traceback.format_exc() - HAS_GITHUB_PACKAGE = False - - -def authenticate(username=None, password=None, access_token=None, api_url=None): - if not api_url: - return None - - if access_token: - return Github(base_url=api_url, login_or_token=access_token) - else: - return Github(base_url=api_url, login_or_token=username, password=password) - - -def create_repo(gh, name, organization=None, private=None, description=None, check_mode=False): - result = dict( - changed=False, - repo=dict()) - if organization: - target = gh.get_organization(organization) - else: - target = gh.get_user() - - repo = None - try: - repo = target.get_repo(name=name) - result['repo'] = repo.raw_data - except UnknownObjectException: - if not check_mode: - repo = target.create_repo( - name=name, - private=GithubObject.NotSet if private is None else private, - description=GithubObject.NotSet if description is None else description, - ) - result['repo'] = repo.raw_data - - result['changed'] = True - - changes = {} - if private is not None: - if repo is None or repo.raw_data['private'] != private: - changes['private'] = private - if description is not None: - if repo is None or repo.raw_data['description'] not in (description, description or None): - changes['description'] = description - - if changes: - if not check_mode: - repo.edit(**changes) - - result['repo'].update({ - 'private': repo._private.value if not check_mode else private, - 'description': repo._description.value if not check_mode else description, - }) - result['changed'] = True - - return result - - -def delete_repo(gh, name, organization=None, check_mode=False): - result = dict(changed=False) - if organization: - target = gh.get_organization(organization) - else: - target = gh.get_user() - try: - repo = target.get_repo(name=name) - if not check_mode: - repo.delete() - result['changed'] = True - except UnknownObjectException: - pass - - return result - - -def run_module(params, check_mode=False): - if params['force_defaults']: - params['description'] = params['description'] or '' - params['private'] = params['private'] or False - - gh = authenticate( - username=params['username'], password=params['password'], access_token=params['access_token'], - api_url=params['api_url']) - if params['state'] == "absent": - return delete_repo( - gh=gh, - name=params['name'], - organization=params['organization'], - check_mode=check_mode - ) - else: - return create_repo( - gh=gh, - name=params['name'], - organization=params['organization'], - private=params['private'], - description=params['description'], - check_mode=check_mode - ) - - -def main(): - module_args = dict( - username=dict(type='str'), - password=dict(type='str', no_log=True), - access_token=dict(type='str', no_log=True), - name=dict(type='str', required=True), - state=dict(type='str', required=False, default="present", - choices=["present", "absent"]), - organization=dict(type='str', required=False, default=None), - private=dict(type='bool'), - description=dict(type='str'), - api_url=dict(type='str', required=False, default='https://api.github.com'), - force_defaults=dict(type='bool', default=True), - ) - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True, - required_together=[('username', 'password')], - required_one_of=[('username', 'access_token')], - mutually_exclusive=[('username', 'access_token')] - ) - - if not HAS_GITHUB_PACKAGE: - module.fail_json(msg=missing_required_lib( - "PyGithub"), exception=GITHUB_IMP_ERR) - - try: - result = run_module(module.params, module.check_mode) - module.exit_json(**result) - except GithubException as e: - module.fail_json(msg="Github error. {0}".format(repr(e))) - except Exception as e: - module.fail_json(msg="Unexpected error. {0}".format(repr(e))) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/source_control/github/github_webhook.py b/ansible_collections/community/general/plugins/modules/source_control/github/github_webhook.py deleted file mode 100644 index fcb6f8d0..00000000 --- a/ansible_collections/community/general/plugins/modules/source_control/github/github_webhook.py +++ /dev/null @@ -1,290 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: github_webhook -short_description: Manage GitHub webhooks -description: - - "Create and delete GitHub webhooks" -requirements: - - "PyGithub >= 1.3.5" -options: - repository: - description: - - Full name of the repository to configure a hook for - type: str - required: true - aliases: - - repo - url: - description: - - URL to which payloads will be delivered - type: str - required: true - content_type: - description: - - The media type used to serialize the payloads - type: str - required: false - choices: [ form, json ] - default: form - secret: - description: - - The shared secret between GitHub and the payload URL. - type: str - required: false - insecure_ssl: - description: - - > - Flag to indicate that GitHub should skip SSL verification when calling - the hook. - required: false - type: bool - default: false - events: - description: - - > - A list of GitHub events the hook is triggered for. Events are listed at - U(https://developer.github.com/v3/activity/events/types/). Required - unless C(state) is C(absent) - required: false - type: list - elements: str - active: - description: - - Whether or not the hook is active - required: false - type: bool - default: true - state: - description: - - Whether the hook should be present or absent - type: str - required: false - choices: [ absent, present ] - default: present - user: - description: - - User to authenticate to GitHub as - type: str - required: true - password: - description: - - Password to authenticate to GitHub with - type: str - required: false - token: - description: - - Token to authenticate to GitHub with - type: str - required: false - github_url: - description: - - Base URL of the GitHub API - type: str - required: false - default: https://api.github.com - -author: - - "Chris St. Pierre (@stpierre)" -''' - -EXAMPLES = ''' -- name: Create a new webhook that triggers on push (password auth) - community.general.github_webhook: - repository: ansible/ansible - url: https://www.example.com/hooks/ - events: - - push - user: "{{ github_user }}" - password: "{{ github_password }}" - -- name: Create a new webhook in a github enterprise installation with multiple event triggers (token auth) - community.general.github_webhook: - repository: myorg/myrepo - url: https://jenkins.example.com/ghprbhook/ - content_type: json - secret: "{{ github_shared_secret }}" - insecure_ssl: True - events: - - issue_comment - - pull_request - user: "{{ github_user }}" - token: "{{ github_user_api_token }}" - github_url: https://github.example.com - -- name: Delete a webhook (password auth) - community.general.github_webhook: - repository: ansible/ansible - url: https://www.example.com/hooks/ - state: absent - user: "{{ github_user }}" - password: "{{ github_password }}" -''' - -RETURN = ''' ---- -hook_id: - description: The GitHub ID of the hook created/updated - returned: when state is 'present' - type: int - sample: 6206 -''' - -import traceback - -GITHUB_IMP_ERR = None -try: - import github - HAS_GITHUB = True -except ImportError: - GITHUB_IMP_ERR = traceback.format_exc() - HAS_GITHUB = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -def _create_hook_config(module): - return { - "url": module.params["url"], - "content_type": module.params["content_type"], - "secret": module.params.get("secret"), - "insecure_ssl": "1" if module.params["insecure_ssl"] else "0" - } - - -def create_hook(repo, module): - config = _create_hook_config(module) - try: - hook = repo.create_hook( - name="web", - config=config, - events=module.params["events"], - active=module.params["active"]) - except github.GithubException as err: - module.fail_json(msg="Unable to create hook for repository %s: %s" % ( - repo.full_name, to_native(err))) - - data = {"hook_id": hook.id} - return True, data - - -def update_hook(repo, hook, module): - config = _create_hook_config(module) - try: - hook.update() - hook.edit( - name="web", - config=config, - events=module.params["events"], - active=module.params["active"]) - - changed = hook.update() - except github.GithubException as err: - module.fail_json(msg="Unable to modify hook for repository %s: %s" % ( - repo.full_name, to_native(err))) - - data = {"hook_id": hook.id} - return changed, data - - -def main(): - module = AnsibleModule( - argument_spec=dict( - repository=dict(type='str', required=True, aliases=['repo']), - url=dict(type='str', required=True), - content_type=dict( - type='str', - choices=('json', 'form'), - required=False, - default='form'), - secret=dict(type='str', required=False, no_log=True), - insecure_ssl=dict(type='bool', required=False, default=False), - events=dict(type='list', elements='str', required=False), - active=dict(type='bool', required=False, default=True), - state=dict( - type='str', - required=False, - choices=('absent', 'present'), - default='present'), - user=dict(type='str', required=True), - password=dict(type='str', required=False, no_log=True), - token=dict(type='str', required=False, no_log=True), - github_url=dict( - type='str', required=False, default="https://api.github.com")), - mutually_exclusive=(('password', 'token'),), - required_one_of=(("password", "token"),), - required_if=(("state", "present", ("events",)),), - ) - - if not HAS_GITHUB: - module.fail_json(msg=missing_required_lib('PyGithub'), - exception=GITHUB_IMP_ERR) - - try: - github_conn = github.Github( - module.params["user"], - module.params.get("password") or module.params.get("token"), - base_url=module.params["github_url"]) - except github.GithubException as err: - module.fail_json(msg="Could not connect to GitHub at %s: %s" % ( - module.params["github_url"], to_native(err))) - - try: - repo = github_conn.get_repo(module.params["repository"]) - except github.BadCredentialsException as err: - module.fail_json(msg="Could not authenticate to GitHub at %s: %s" % ( - module.params["github_url"], to_native(err))) - except github.UnknownObjectException as err: - module.fail_json( - msg="Could not find repository %s in GitHub at %s: %s" % ( - module.params["repository"], module.params["github_url"], - to_native(err))) - except Exception as err: - module.fail_json( - msg="Could not fetch repository %s from GitHub at %s: %s" % - (module.params["repository"], module.params["github_url"], - to_native(err)), - exception=traceback.format_exc()) - - hook = None - try: - for hook in repo.get_hooks(): - if hook.config.get("url") == module.params["url"]: - break - else: - hook = None - except github.GithubException as err: - module.fail_json(msg="Unable to get hooks from repository %s: %s" % ( - module.params["repository"], to_native(err))) - - changed = False - data = {} - if hook is None and module.params["state"] == "present": - changed, data = create_hook(repo, module) - elif hook is not None and module.params["state"] == "absent": - try: - hook.delete() - except github.GithubException as err: - module.fail_json( - msg="Unable to delete hook from repository %s: %s" % ( - repo.full_name, to_native(err))) - else: - changed = True - elif hook is not None and module.params["state"] == "present": - changed, data = update_hook(repo, hook, module) - # else, there is no hook and we want there to be no hook - - module.exit_json(changed=changed, **data) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/source_control/github/github_webhook_info.py b/ansible_collections/community/general/plugins/modules/source_control/github/github_webhook_info.py deleted file mode 100644 index 98a7516e..00000000 --- a/ansible_collections/community/general/plugins/modules/source_control/github/github_webhook_info.py +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: github_webhook_info -short_description: Query information about GitHub webhooks -description: - - "Query information about GitHub webhooks" - - This module was called C(github_webhook_facts) before Ansible 2.9. The usage did not change. -requirements: - - "PyGithub >= 1.3.5" -options: - repository: - description: - - Full name of the repository to configure a hook for - type: str - required: true - aliases: - - repo - user: - description: - - User to authenticate to GitHub as - type: str - required: true - password: - description: - - Password to authenticate to GitHub with - type: str - required: false - token: - description: - - Token to authenticate to GitHub with - type: str - required: false - github_url: - description: - - Base URL of the github api - type: str - required: false - default: https://api.github.com - -author: - - "Chris St. Pierre (@stpierre)" -''' - -EXAMPLES = ''' -- name: List hooks for a repository (password auth) - community.general.github_webhook_info: - repository: ansible/ansible - user: "{{ github_user }}" - password: "{{ github_password }}" - register: ansible_webhooks - -- name: List hooks for a repository on GitHub Enterprise (token auth) - community.general.github_webhook_info: - repository: myorg/myrepo - user: "{{ github_user }}" - token: "{{ github_user_api_token }}" - github_url: https://github.example.com/api/v3/ - register: myrepo_webhooks -''' - -RETURN = ''' ---- -hooks: - description: A list of hooks that exist for the repo - returned: always - type: list - sample: > - [{"has_shared_secret": true, - "url": "https://jenkins.example.com/ghprbhook/", - "events": ["issue_comment", "pull_request"], - "insecure_ssl": "1", - "content_type": "json", - "active": true, - "id": 6206, - "last_response": {"status": "active", "message": "OK", "code": 200}}] -''' - -import traceback - -GITHUB_IMP_ERR = None -try: - import github - HAS_GITHUB = True -except ImportError: - GITHUB_IMP_ERR = traceback.format_exc() - HAS_GITHUB = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -def _munge_hook(hook_obj): - retval = { - "active": hook_obj.active, - "events": hook_obj.events, - "id": hook_obj.id, - "url": hook_obj.url, - } - retval.update(hook_obj.config) - retval["has_shared_secret"] = "secret" in retval - if "secret" in retval: - del retval["secret"] - - retval["last_response"] = hook_obj.last_response.raw_data - return retval - - -def main(): - module = AnsibleModule( - argument_spec=dict( - repository=dict(type='str', required=True, aliases=["repo"]), - user=dict(type='str', required=True), - password=dict(type='str', required=False, no_log=True), - token=dict(type='str', required=False, no_log=True), - github_url=dict( - type='str', required=False, default="https://api.github.com")), - mutually_exclusive=(('password', 'token'), ), - required_one_of=(("password", "token"), ), - supports_check_mode=True) - - if not HAS_GITHUB: - module.fail_json(msg=missing_required_lib('PyGithub'), - exception=GITHUB_IMP_ERR) - - try: - github_conn = github.Github( - module.params["user"], - module.params.get("password") or module.params.get("token"), - base_url=module.params["github_url"]) - except github.GithubException as err: - module.fail_json(msg="Could not connect to GitHub at %s: %s" % ( - module.params["github_url"], to_native(err))) - - try: - repo = github_conn.get_repo(module.params["repository"]) - except github.BadCredentialsException as err: - module.fail_json(msg="Could not authenticate to GitHub at %s: %s" % ( - module.params["github_url"], to_native(err))) - except github.UnknownObjectException as err: - module.fail_json( - msg="Could not find repository %s in GitHub at %s: %s" % ( - module.params["repository"], module.params["github_url"], - to_native(err))) - except Exception as err: - module.fail_json( - msg="Could not fetch repository %s from GitHub at %s: %s" % - (module.params["repository"], module.params["github_url"], - to_native(err)), - exception=traceback.format_exc()) - - try: - hooks = [_munge_hook(h) for h in repo.get_hooks()] - except github.GithubException as err: - module.fail_json( - msg="Unable to get hooks from repository %s: %s" % - (module.params["repository"], to_native(err)), - exception=traceback.format_exc()) - - module.exit_json(changed=False, hooks=hooks) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_branch.py b/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_branch.py deleted file mode 100644 index ce71a00a..00000000 --- a/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_branch.py +++ /dev/null @@ -1,185 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Werner Dijkerman (ikben@werner-dijkerman.nl) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' -module: gitlab_branch -short_description: Create or delete a branch -version_added: 4.2.0 -description: - - This module allows to create or delete branches. -author: - - paytroff (@paytroff) -requirements: - - python >= 2.7 - - python-gitlab >= 2.3.0 -extends_documentation_fragment: - - community.general.auth_basic - - community.general.gitlab - -options: - state: - description: - - Create or delete branch. - default: present - type: str - choices: ["present", "absent"] - project: - description: - - The path or name of the project. - required: true - type: str - branch: - description: - - The name of the branch that needs to be created. - required: true - type: str - ref_branch: - description: - - Reference branch to create from. - - This must be specified if I(state=present). - type: str -''' - - -EXAMPLES = ''' -- name: Create branch branch2 from main - community.general.gitlab_branch: - api_url: https://gitlab.com - api_token: secret_access_token - project: "group1/project1" - branch: branch2 - ref_branch: main - state: present - -- name: Delete branch branch2 - community.general.gitlab_branch: - api_url: https://gitlab.com - api_token: secret_access_token - project: "group1/project1" - branch: branch2 - state: absent - -''' - -RETURN = ''' -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.api import basic_auth_argument_spec - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -GITLAB_IMP_ERR = None -try: - import gitlab - HAS_GITLAB_PACKAGE = True -except Exception: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_GITLAB_PACKAGE = False - -from ansible_collections.community.general.plugins.module_utils.gitlab import auth_argument_spec, gitlab_authentication - - -class GitlabBranch(object): - - def __init__(self, module, project, gitlab_instance): - self.repo = gitlab_instance - self._module = module - self.project = self.get_project(project) - - def get_project(self, project): - try: - return self.repo.projects.get(project) - except Exception as e: - return False - - def get_branch(self, branch): - try: - return self.project.branches.get(branch) - except Exception as e: - return False - - def create_branch(self, branch, ref_branch): - return self.project.branches.create({'branch': branch, 'ref': ref_branch}) - - def delete_branch(self, branch): - branch.unprotect() - return branch.delete() - - -def main(): - argument_spec = basic_auth_argument_spec() - argument_spec.update(auth_argument_spec()) - argument_spec.update( - project=dict(type='str', required=True), - branch=dict(type='str', required=True), - ref_branch=dict(type='str', required=False), - state=dict(type='str', default="present", choices=["absent", "present"]), - ) - - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_token', 'api_job_token'], - ], - required_together=[ - ['api_username', 'api_password'], - ], - required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] - ], - required_if=[ - ['state', 'present', ['ref_branch'], True], - ], - supports_check_mode=False - ) - - project = module.params['project'] - branch = module.params['branch'] - ref_branch = module.params['ref_branch'] - state = module.params['state'] - - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) - - gitlab_version = gitlab.__version__ - if LooseVersion(gitlab_version) < LooseVersion('2.3.0'): - module.fail_json(msg="community.general.gitlab_proteched_branch requires python-gitlab Python module >= 2.3.0 (installed version: [%s])." - " Please upgrade python-gitlab to version 2.3.0 or above." % gitlab_version) - - gitlab_instance = gitlab_authentication(module) - this_gitlab = GitlabBranch(module=module, project=project, gitlab_instance=gitlab_instance) - - this_branch = this_gitlab.get_branch(branch) - - if not this_branch and state == "present": - r_branch = this_gitlab.get_branch(ref_branch) - if not r_branch: - module.fail_json(msg="Ref branch {b} not exist.".format(b=ref_branch)) - this_gitlab.create_branch(branch, ref_branch) - module.exit_json(changed=True, msg="Created the branch {b}.".format(b=branch)) - elif this_branch and state == "present": - module.exit_json(changed=False, msg="Branch {b} already exist".format(b=branch)) - elif this_branch and state == "absent": - try: - this_gitlab.delete_branch(this_branch) - module.exit_json(changed=True, msg="Branch {b} deleted.".format(b=branch)) - except Exception as e: - module.fail_json(msg="Error delete branch.", exception=traceback.format_exc()) - else: - module.exit_json(changed=False, msg="No changes are needed.") - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_deploy_key.py b/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_deploy_key.py deleted file mode 100644 index 5746186c..00000000 --- a/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_deploy_key.py +++ /dev/null @@ -1,303 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) -# Copyright: (c) 2018, Marcus Watkins -# Based on code: -# Copyright: (c) 2013, Phillip Gentry -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' -module: gitlab_deploy_key -short_description: Manages GitLab project deploy keys. -description: - - Adds, updates and removes project deploy keys -author: - - Marcus Watkins (@marwatk) - - Guillaume Martinez (@Lunik) -requirements: - - python >= 2.7 - - python-gitlab python module -extends_documentation_fragment: - - community.general.auth_basic - - community.general.gitlab - -options: - project: - description: - - Id or Full path of project in the form of group/name. - required: true - type: str - title: - description: - - Deploy key's title. - required: true - type: str - key: - description: - - Deploy key - required: true - type: str - can_push: - description: - - Whether this key can push to the project. - type: bool - default: no - state: - description: - - When C(present) the deploy key added to the project if it doesn't exist. - - When C(absent) it will be removed from the project if it exists. - default: present - type: str - choices: [ "present", "absent" ] -''' - -EXAMPLES = ''' -- name: "Adding a project deploy key" - community.general.gitlab_deploy_key: - api_url: https://gitlab.example.com/ - api_token: "{{ api_token }}" - project: "my_group/my_project" - title: "Jenkins CI" - state: present - key: "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9w..." - -- name: "Update the above deploy key to add push access" - community.general.gitlab_deploy_key: - api_url: https://gitlab.example.com/ - api_token: "{{ api_token }}" - project: "my_group/my_project" - title: "Jenkins CI" - state: present - can_push: yes - -- name: "Remove the previous deploy key from the project" - community.general.gitlab_deploy_key: - api_url: https://gitlab.example.com/ - api_token: "{{ api_token }}" - project: "my_group/my_project" - state: absent - key: "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9w..." - -''' - -RETURN = ''' -msg: - description: Success or failure message - returned: always - type: str - sample: "Success" - -result: - description: json parsed response from the server - returned: always - type: dict - -error: - description: the error message returned by the GitLab API - returned: failed - type: str - sample: "400: key is already in use" - -deploy_key: - description: API object - returned: always - type: dict -''' - -import re -import traceback - -GITLAB_IMP_ERR = None -try: - import gitlab - HAS_GITLAB_PACKAGE = True -except Exception: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_GITLAB_PACKAGE = False - -from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - -from ansible_collections.community.general.plugins.module_utils.gitlab import auth_argument_spec, find_project, gitlab_authentication - - -class GitLabDeployKey(object): - def __init__(self, module, gitlab_instance): - self._module = module - self._gitlab = gitlab_instance - self.deploy_key_object = None - - ''' - @param project Project object - @param key_title Title of the key - @param key_key String of the key - @param key_can_push Option of the deploy_key - @param options Deploy key options - ''' - def create_or_update_deploy_key(self, project, key_title, key_key, options): - changed = False - - # note: unfortunately public key cannot be updated directly by - # GitLab REST API, so for that case we need to delete and - # than recreate the key - if self.deploy_key_object and self.deploy_key_object.key != key_key: - if not self._module.check_mode: - self.deploy_key_object.delete() - self.deploy_key_object = None - - # Because we have already call exists_deploy_key in main() - if self.deploy_key_object is None: - deploy_key = self.create_deploy_key(project, { - 'title': key_title, - 'key': key_key, - 'can_push': options['can_push']}) - changed = True - else: - changed, deploy_key = self.update_deploy_key(self.deploy_key_object, { - 'can_push': options['can_push']}) - - self.deploy_key_object = deploy_key - if changed: - if self._module.check_mode: - self._module.exit_json(changed=True, msg="Successfully created or updated the deploy key %s" % key_title) - - try: - deploy_key.save() - except Exception as e: - self._module.fail_json(msg="Failed to update deploy key: %s " % e) - return True - else: - return False - - ''' - @param project Project Object - @param arguments Attributes of the deploy_key - ''' - def create_deploy_key(self, project, arguments): - if self._module.check_mode: - return True - - try: - deploy_key = project.keys.create(arguments) - except (gitlab.exceptions.GitlabCreateError) as e: - self._module.fail_json(msg="Failed to create deploy key: %s " % to_native(e)) - - return deploy_key - - ''' - @param deploy_key Deploy Key Object - @param arguments Attributes of the deploy_key - ''' - def update_deploy_key(self, deploy_key, arguments): - changed = False - - for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - if getattr(deploy_key, arg_key) != arguments[arg_key]: - setattr(deploy_key, arg_key, arguments[arg_key]) - changed = True - - return (changed, deploy_key) - - ''' - @param project Project object - @param key_title Title of the key - ''' - def find_deploy_key(self, project, key_title): - deploy_keys = project.keys.list(all=True) - for deploy_key in deploy_keys: - if (deploy_key.title == key_title): - return deploy_key - - ''' - @param project Project object - @param key_title Title of the key - ''' - def exists_deploy_key(self, project, key_title): - # When project exists, object will be stored in self.project_object. - deploy_key = self.find_deploy_key(project, key_title) - if deploy_key: - self.deploy_key_object = deploy_key - return True - return False - - def delete_deploy_key(self): - if self._module.check_mode: - return True - - return self.deploy_key_object.delete() - - -def main(): - argument_spec = basic_auth_argument_spec() - argument_spec.update(auth_argument_spec()) - argument_spec.update(dict( - state=dict(type='str', default="present", choices=["absent", "present"]), - project=dict(type='str', required=True), - key=dict(type='str', required=True, no_log=False), - can_push=dict(type='bool', default=False), - title=dict(type='str', required=True) - )) - - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_token', 'api_job_token'], - ], - required_together=[ - ['api_username', 'api_password'] - ], - required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] - ], - supports_check_mode=True, - ) - - state = module.params['state'] - project_identifier = module.params['project'] - key_title = module.params['title'] - key_keyfile = module.params['key'] - key_can_push = module.params['can_push'] - - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) - - gitlab_instance = gitlab_authentication(module) - - gitlab_deploy_key = GitLabDeployKey(module, gitlab_instance) - - project = find_project(gitlab_instance, project_identifier) - - if project is None: - module.fail_json(msg="Failed to create deploy key: project %s doesn't exists" % project_identifier) - - deploy_key_exists = gitlab_deploy_key.exists_deploy_key(project, key_title) - - if state == 'absent': - if deploy_key_exists: - gitlab_deploy_key.delete_deploy_key() - module.exit_json(changed=True, msg="Successfully deleted deploy key %s" % key_title) - else: - module.exit_json(changed=False, msg="Deploy key deleted or does not exists") - - if state == 'present': - if gitlab_deploy_key.create_or_update_deploy_key(project, key_title, key_keyfile, {'can_push': key_can_push}): - - module.exit_json(changed=True, msg="Successfully created or updated the deploy key %s" % key_title, - deploy_key=gitlab_deploy_key.deploy_key_object._attrs) - else: - module.exit_json(changed=False, msg="No need to update the deploy key %s" % key_title, - deploy_key=gitlab_deploy_key.deploy_key_object._attrs) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group.py b/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group.py deleted file mode 100644 index 8575c06f..00000000 --- a/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group.py +++ /dev/null @@ -1,399 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) -# Copyright: (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: gitlab_group -short_description: Creates/updates/deletes GitLab Groups -description: - - When the group does not exist in GitLab, it will be created. - - When the group does exist and state=absent, the group will be deleted. -author: - - Werner Dijkerman (@dj-wasabi) - - Guillaume Martinez (@Lunik) -requirements: - - python >= 2.7 - - python-gitlab python module -extends_documentation_fragment: - - community.general.auth_basic - - community.general.gitlab - -options: - name: - description: - - Name of the group you want to create. - required: true - type: str - path: - description: - - The path of the group you want to create, this will be api_url/group_path - - If not supplied, the group_name will be used. - type: str - description: - description: - - A description for the group. - type: str - state: - description: - - create or delete group. - - Possible values are present and absent. - default: present - type: str - choices: ["present", "absent"] - parent: - description: - - Allow to create subgroups - - Id or Full path of parent group in the form of group/name - type: str - visibility: - description: - - Default visibility of the group - choices: ["private", "internal", "public"] - default: private - type: str - project_creation_level: - description: - - Determine if developers can create projects in the group. - choices: ["developer", "maintainer", "noone"] - type: str - version_added: 3.7.0 - auto_devops_enabled: - description: - - Default to Auto DevOps pipeline for all projects within this group. - type: bool - version_added: 3.7.0 - subgroup_creation_level: - description: - - Allowed to create subgroups. - choices: ["maintainer", "owner"] - type: str - version_added: 3.7.0 - require_two_factor_authentication: - description: - - Require all users in this group to setup two-factor authentication. - type: bool - version_added: 3.7.0 - avatar_path: - description: - - Absolute path image to configure avatar. File size should not exceed 200 kb. - - This option is only used on creation, not for updates. - type: path - version_added: 4.2.0 -''' - -EXAMPLES = ''' -- name: "Delete GitLab Group" - community.general.gitlab_group: - api_url: https://gitlab.example.com/ - api_token: "{{ access_token }}" - validate_certs: False - name: my_first_group - state: absent - -- name: "Create GitLab Group" - community.general.gitlab_group: - api_url: https://gitlab.example.com/ - validate_certs: True - api_username: dj-wasabi - api_password: "MySecretPassword" - name: my_first_group - path: my_first_group - state: present - -# The group will by created at https://gitlab.dj-wasabi.local/super_parent/parent/my_first_group -- name: "Create GitLab SubGroup" - community.general.gitlab_group: - api_url: https://gitlab.example.com/ - validate_certs: True - api_username: dj-wasabi - api_password: "MySecretPassword" - name: my_first_group - path: my_first_group - state: present - parent: "super_parent/parent" - -# Other group which only allows sub-groups - no projects -- name: "Create GitLab Group for SubGroups only" - community.general.gitlab_group: - api_url: https://gitlab.example.com/ - validate_certs: True - api_username: dj-wasabi - api_password: "MySecretPassword" - name: my_main_group - path: my_main_group - state: present - project_creation_level: noone - auto_devops_enabled: false - subgroup_creation_level: maintainer -''' - -RETURN = ''' -msg: - description: Success or failure message - returned: always - type: str - sample: "Success" - -result: - description: json parsed response from the server - returned: always - type: dict - -error: - description: the error message returned by the GitLab API - returned: failed - type: str - sample: "400: path is already in use" - -group: - description: API object - returned: always - type: dict -''' - -import traceback - -GITLAB_IMP_ERR = None -try: - import gitlab - HAS_GITLAB_PACKAGE = True -except Exception: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_GITLAB_PACKAGE = False - -from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - -from ansible_collections.community.general.plugins.module_utils.gitlab import auth_argument_spec, find_group, gitlab_authentication - - -class GitLabGroup(object): - def __init__(self, module, gitlab_instance): - self._module = module - self._gitlab = gitlab_instance - self.group_object = None - - ''' - @param group Group object - ''' - def get_group_id(self, group): - if group is not None: - return group.id - return None - - ''' - @param name Name of the group - @param parent Parent group full path - @param options Group options - ''' - def create_or_update_group(self, name, parent, options): - changed = False - - # Because we have already call userExists in main() - if self.group_object is None: - parent_id = self.get_group_id(parent) - - payload = { - 'name': name, - 'path': options['path'], - 'parent_id': parent_id, - 'visibility': options['visibility'], - 'project_creation_level': options['project_creation_level'], - 'auto_devops_enabled': options['auto_devops_enabled'], - 'subgroup_creation_level': options['subgroup_creation_level'], - } - if options.get('description'): - payload['description'] = options['description'] - if options.get('require_two_factor_authentication'): - payload['require_two_factor_authentication'] = options['require_two_factor_authentication'] - group = self.create_group(payload) - - # add avatar to group - if options['avatar_path']: - try: - group.avatar = open(options['avatar_path'], 'rb') - except IOError as e: - self._module.fail_json(msg='Cannot open {0}: {1}'.format(options['avatar_path'], e)) - changed = True - else: - changed, group = self.update_group(self.group_object, { - 'name': name, - 'description': options['description'], - 'visibility': options['visibility'], - 'project_creation_level': options['project_creation_level'], - 'auto_devops_enabled': options['auto_devops_enabled'], - 'subgroup_creation_level': options['subgroup_creation_level'], - 'require_two_factor_authentication': options['require_two_factor_authentication'], - }) - - self.group_object = group - if changed: - if self._module.check_mode: - self._module.exit_json(changed=True, msg="Successfully created or updated the group %s" % name) - - try: - group.save() - except Exception as e: - self._module.fail_json(msg="Failed to update group: %s " % e) - return True - else: - return False - - ''' - @param arguments Attributes of the group - ''' - def create_group(self, arguments): - if self._module.check_mode: - return True - - try: - group = self._gitlab.groups.create(arguments) - except (gitlab.exceptions.GitlabCreateError) as e: - self._module.fail_json(msg="Failed to create group: %s " % to_native(e)) - - return group - - ''' - @param group Group Object - @param arguments Attributes of the group - ''' - def update_group(self, group, arguments): - changed = False - - for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - if getattr(group, arg_key) != arguments[arg_key]: - setattr(group, arg_key, arguments[arg_key]) - changed = True - - return (changed, group) - - def delete_group(self): - group = self.group_object - - if len(group.projects.list()) >= 1: - self._module.fail_json( - msg="There are still projects in this group. These needs to be moved or deleted before this group can be removed.") - else: - if self._module.check_mode: - return True - - try: - group.delete() - except Exception as e: - self._module.fail_json(msg="Failed to delete group: %s " % to_native(e)) - - ''' - @param name Name of the groupe - @param full_path Complete path of the Group including parent group path. / - ''' - def exists_group(self, project_identifier): - # When group/user exists, object will be stored in self.group_object. - group = find_group(self._gitlab, project_identifier) - if group: - self.group_object = group - return True - return False - - -def main(): - argument_spec = basic_auth_argument_spec() - argument_spec.update(auth_argument_spec()) - argument_spec.update(dict( - name=dict(type='str', required=True), - path=dict(type='str'), - description=dict(type='str'), - state=dict(type='str', default="present", choices=["absent", "present"]), - parent=dict(type='str'), - visibility=dict(type='str', default="private", choices=["internal", "private", "public"]), - project_creation_level=dict(type='str', choices=['developer', 'maintainer', 'noone']), - auto_devops_enabled=dict(type='bool'), - subgroup_creation_level=dict(type='str', choices=['maintainer', 'owner']), - require_two_factor_authentication=dict(type='bool'), - avatar_path=dict(type='path'), - )) - - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_token', 'api_job_token'], - ], - required_together=[ - ['api_username', 'api_password'], - ], - required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] - ], - supports_check_mode=True, - ) - - group_name = module.params['name'] - group_path = module.params['path'] - description = module.params['description'] - state = module.params['state'] - parent_identifier = module.params['parent'] - group_visibility = module.params['visibility'] - project_creation_level = module.params['project_creation_level'] - auto_devops_enabled = module.params['auto_devops_enabled'] - subgroup_creation_level = module.params['subgroup_creation_level'] - require_two_factor_authentication = module.params['require_two_factor_authentication'] - avatar_path = module.params['avatar_path'] - - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) - - gitlab_instance = gitlab_authentication(module) - - # Define default group_path based on group_name - if group_path is None: - group_path = group_name.replace(" ", "_") - - gitlab_group = GitLabGroup(module, gitlab_instance) - - parent_group = None - if parent_identifier: - parent_group = find_group(gitlab_instance, parent_identifier) - if not parent_group: - module.fail_json(msg="Failed create GitLab group: Parent group doesn't exists") - - group_exists = gitlab_group.exists_group(parent_group.full_path + '/' + group_path) - else: - group_exists = gitlab_group.exists_group(group_path) - - if state == 'absent': - if group_exists: - gitlab_group.delete_group() - module.exit_json(changed=True, msg="Successfully deleted group %s" % group_name) - else: - module.exit_json(changed=False, msg="Group deleted or does not exists") - - if state == 'present': - if gitlab_group.create_or_update_group(group_name, parent_group, { - "path": group_path, - "description": description, - "visibility": group_visibility, - "project_creation_level": project_creation_level, - "auto_devops_enabled": auto_devops_enabled, - "subgroup_creation_level": subgroup_creation_level, - "require_two_factor_authentication": require_two_factor_authentication, - "avatar_path": avatar_path, - }): - module.exit_json(changed=True, msg="Successfully created or updated the group %s" % group_name, group=gitlab_group.group_object._attrs) - else: - module.exit_json(changed=False, msg="No need to update the group %s" % group_name, group=gitlab_group.group_object._attrs) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group_members.py b/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group_members.py deleted file mode 100644 index 4b8a7506..00000000 --- a/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group_members.py +++ /dev/null @@ -1,450 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2020, Zainab Alsaffar -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: gitlab_group_members -short_description: Manage group members on GitLab Server -description: - - This module allows to add and remove members to/from a group, or change a member's access level in a group on GitLab. -version_added: '1.2.0' -author: Zainab Alsaffar (@zanssa) -requirements: - - python-gitlab python module <= 1.15.0 - - administrator rights on the GitLab server -extends_documentation_fragment: - - community.general.auth_basic - - community.general.gitlab - -options: - gitlab_group: - description: - - The C(full_path) of the GitLab group the member is added to/removed from. - - Setting this to C(name) or C(path) is deprecated and will be removed in community.general 6.0.0. Use C(full_path) instead. - required: true - type: str - gitlab_user: - description: - - A username or a list of usernames to add to/remove from the GitLab group. - - Mutually exclusive with I(gitlab_users_access). - type: list - elements: str - access_level: - description: - - The access level for the user. - - Required if I(state=present), user state is set to present. - - Mutually exclusive with I(gitlab_users_access). - type: str - choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] - gitlab_users_access: - description: - - Provide a list of user to access level mappings. - - Every dictionary in this list specifies a user (by username) and the access level the user should have. - - Mutually exclusive with I(gitlab_user) and I(access_level). - - Use together with I(purge_users) to remove all users not specified here from the group. - type: list - elements: dict - suboptions: - name: - description: A username or a list of usernames to add to/remove from the GitLab group. - type: str - required: true - access_level: - description: - - The access level for the user. - - Required if I(state=present), user state is set to present. - type: str - choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] - required: true - version_added: 3.6.0 - state: - description: - - State of the member in the group. - - On C(present), it adds a user to a GitLab group. - - On C(absent), it removes a user from a GitLab group. - choices: ['present', 'absent'] - default: 'present' - type: str - purge_users: - description: - - Adds/remove users of the given access_level to match the given I(gitlab_user)/I(gitlab_users_access) list. - If omitted do not purge orphaned members. - - Is only used when I(state=present). - type: list - elements: str - choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] - version_added: 3.6.0 -notes: - - Supports C(check_mode). -''' - -EXAMPLES = r''' -- name: Add a user to a GitLab Group - community.general.gitlab_group_members: - api_url: 'https://gitlab.example.com' - api_token: 'Your-Private-Token' - gitlab_group: groupname - gitlab_user: username - access_level: developer - state: present - -- name: Remove a user from a GitLab Group - community.general.gitlab_group_members: - api_url: 'https://gitlab.example.com' - api_token: 'Your-Private-Token' - gitlab_group: groupname - gitlab_user: username - state: absent - -- name: Add a list of Users to A GitLab Group - community.general.gitlab_group_members: - api_url: 'https://gitlab.example.com' - api_token: 'Your-Private-Token' - gitlab_group: groupname - gitlab_user: - - user1 - - user2 - access_level: developer - state: present - -- name: Add a list of Users with Dedicated Access Levels to A GitLab Group - community.general.gitlab_group_members: - api_url: 'https://gitlab.example.com' - api_token: 'Your-Private-Token' - gitlab_group: groupname - gitlab_users_access: - - name: user1 - access_level: developer - - name: user2 - access_level: maintainer - state: present - -- name: Add a user, remove all others which might be on this access level - community.general.gitlab_group_members: - api_url: 'https://gitlab.example.com' - api_token: 'Your-Private-Token' - gitlab_group: groupname - gitlab_user: username - access_level: developer - pruge_users: developer - state: present - -- name: Remove a list of Users with Dedicated Access Levels to A GitLab Group - community.general.gitlab_group_members: - api_url: 'https://gitlab.example.com' - api_token: 'Your-Private-Token' - gitlab_group: groupname - gitlab_users_access: - - name: user1 - access_level: developer - - name: user2 - access_level: maintainer - state: absent -''' - -RETURN = r''' # ''' - -from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - -from ansible_collections.community.general.plugins.module_utils.gitlab import auth_argument_spec, gitlab_authentication - -import traceback - -try: - import gitlab - HAS_PY_GITLAB = True -except ImportError: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_PY_GITLAB = False - - -class GitLabGroup(object): - def __init__(self, module, gl): - self._module = module - self._gitlab = gl - - # get user id if the user exists - def get_user_id(self, gitlab_user): - user_exists = self._gitlab.users.list(username=gitlab_user) - if user_exists: - return user_exists[0].id - - # get group id if group exists - def get_group_id(self, gitlab_group): - groups = self._gitlab.groups.list(search=gitlab_group) - for group in groups: - if group.full_path == gitlab_group: - return group.id - for group in groups: - if group.path == gitlab_group or group.name == gitlab_group: - self._module.deprecate( - msg="Setting 'gitlab_group' to 'name' or 'path' is deprecated. Use 'full_path' instead", - version="6.0.0", collection_name="community.general") - return group.id - - # get all members in a group - def get_members_in_a_group(self, gitlab_group_id): - group = self._gitlab.groups.get(gitlab_group_id) - return group.members.list(all=True) - - # get single member in a group by user name - def get_member_in_a_group(self, gitlab_group_id, gitlab_user_id): - member = None - group = self._gitlab.groups.get(gitlab_group_id) - try: - member = group.members.get(gitlab_user_id) - if member: - return member - except gitlab.exceptions.GitlabGetError as e: - return None - - # check if the user is a member of the group - def is_user_a_member(self, members, gitlab_user_id): - for member in members: - if member.id == gitlab_user_id: - return True - return False - - # add user to a group - def add_member_to_group(self, gitlab_user_id, gitlab_group_id, access_level): - group = self._gitlab.groups.get(gitlab_group_id) - add_member = group.members.create( - {'user_id': gitlab_user_id, 'access_level': access_level}) - - # remove user from a group - def remove_user_from_group(self, gitlab_user_id, gitlab_group_id): - group = self._gitlab.groups.get(gitlab_group_id) - group.members.delete(gitlab_user_id) - - # get user's access level - def get_user_access_level(self, members, gitlab_user_id): - for member in members: - if member.id == gitlab_user_id: - return member.access_level - - # update user's access level in a group - def update_user_access_level(self, members, gitlab_user_id, access_level): - for member in members: - if member.id == gitlab_user_id: - member.access_level = access_level - member.save() - - -def main(): - argument_spec = basic_auth_argument_spec() - argument_spec.update(auth_argument_spec()) - argument_spec.update(dict( - gitlab_group=dict(type='str', required=True), - gitlab_user=dict(type='list', elements='str'), - state=dict(type='str', default='present', choices=['present', 'absent']), - access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']), - purge_users=dict(type='list', elements='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']), - gitlab_users_access=dict( - type='list', - elements='dict', - options=dict( - name=dict(type='str', required=True), - access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner'], required=True), - ) - ), - )) - - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ['api_username', 'api_token'], - ['gitlab_user', 'gitlab_users_access'], - ['access_level', 'gitlab_users_access'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_token', 'api_job_token'], - ], - required_together=[ - ['api_username', 'api_password'], - ['gitlab_user', 'access_level'], - ], - required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'], - ['gitlab_user', 'gitlab_users_access'], - ], - required_if=[ - ['state', 'present', ['access_level', 'gitlab_users_access'], True], - ], - supports_check_mode=True, - ) - - if not HAS_PY_GITLAB: - module.fail_json(msg=missing_required_lib('python-gitlab', url='https://python-gitlab.readthedocs.io/en/stable/'), exception=GITLAB_IMP_ERR) - - access_level_int = { - 'guest': gitlab.GUEST_ACCESS, - 'reporter': gitlab.REPORTER_ACCESS, - 'developer': gitlab.DEVELOPER_ACCESS, - 'maintainer': gitlab.MAINTAINER_ACCESS, - 'owner': gitlab.OWNER_ACCESS, - } - - gitlab_group = module.params['gitlab_group'] - state = module.params['state'] - access_level = module.params['access_level'] - purge_users = module.params['purge_users'] - - if purge_users: - purge_users = [access_level_int[level] for level in purge_users] - - # connect to gitlab server - gl = gitlab_authentication(module) - - group = GitLabGroup(module, gl) - - gitlab_group_id = group.get_group_id(gitlab_group) - - # group doesn't exist - if not gitlab_group_id: - module.fail_json(msg="group '%s' not found." % gitlab_group) - - members = [] - if module.params['gitlab_user'] is not None: - gitlab_users_access = [] - gitlab_users = module.params['gitlab_user'] - for gl_user in gitlab_users: - gitlab_users_access.append({'name': gl_user, 'access_level': access_level_int[access_level] if access_level else None}) - elif module.params['gitlab_users_access'] is not None: - gitlab_users_access = module.params['gitlab_users_access'] - for user_level in gitlab_users_access: - user_level['access_level'] = access_level_int[user_level['access_level']] - - if len(gitlab_users_access) == 1 and not purge_users: - # only single user given - members = [group.get_member_in_a_group(gitlab_group_id, group.get_user_id(gitlab_users_access[0]['name']))] - if members[0] is None: - members = [] - elif len(gitlab_users_access) > 1 or purge_users: - # list of users given - members = group.get_members_in_a_group(gitlab_group_id) - else: - module.exit_json(changed='OK', result="Nothing to do, please give at least one user or set purge_users true.", - result_data=[]) - - changed = False - error = False - changed_users = [] - changed_data = [] - - for gitlab_user in gitlab_users_access: - gitlab_user_id = group.get_user_id(gitlab_user['name']) - - # user doesn't exist - if not gitlab_user_id: - if state == 'absent': - changed_users.append("user '%s' not found, and thus also not part of the group" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', - 'msg': "user '%s' not found, and thus also not part of the group" % gitlab_user['name']}) - else: - error = True - changed_users.append("user '%s' not found." % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', - 'msg': "user '%s' not found." % gitlab_user['name']}) - continue - - is_user_a_member = group.is_user_a_member(members, gitlab_user_id) - - # check if the user is a member in the group - if not is_user_a_member: - if state == 'present': - # add user to the group - try: - if not module.check_mode: - group.add_member_to_group(gitlab_user_id, gitlab_group_id, gitlab_user['access_level']) - changed = True - changed_users.append("Successfully added user '%s' to group" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', - 'msg': "Successfully added user '%s' to group" % gitlab_user['name']}) - except (gitlab.exceptions.GitlabCreateError) as e: - error = True - changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', - 'msg': "Not allowed to add the access level for the member, %s: %s" % (gitlab_user['name'], e)}) - # state as absent - else: - changed_users.append("User, '%s', is not a member in the group. No change to report" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', - 'msg': "User, '%s', is not a member in the group. No change to report" % gitlab_user['name']}) - # in case that a user is a member - else: - if state == 'present': - # compare the access level - user_access_level = group.get_user_access_level(members, gitlab_user_id) - if user_access_level == gitlab_user['access_level']: - changed_users.append("User, '%s', is already a member in the group. No change to report" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', - 'msg': "User, '%s', is already a member in the group. No change to report" % gitlab_user['name']}) - else: - # update the access level for the user - try: - if not module.check_mode: - group.update_user_access_level(members, gitlab_user_id, gitlab_user['access_level']) - changed = True - changed_users.append("Successfully updated the access level for the user, '%s'" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', - 'msg': "Successfully updated the access level for the user, '%s'" % gitlab_user['name']}) - except (gitlab.exceptions.GitlabUpdateError) as e: - error = True - changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', - 'msg': "Not allowed to update the access level for the member, %s: %s" % (gitlab_user['name'], e)}) - else: - # remove the user from the group - try: - if not module.check_mode: - group.remove_user_from_group(gitlab_user_id, gitlab_group_id) - changed = True - changed_users.append("Successfully removed user, '%s', from the group" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', - 'msg': "Successfully removed user, '%s', from the group" % gitlab_user['name']}) - except (gitlab.exceptions.GitlabDeleteError) as e: - error = True - changed_users.append("Failed to removed user, '%s', from the group" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', - 'msg': "Failed to remove user, '%s' from the group: %s" % (gitlab_user['name'], e)}) - - # if state = present and purge_users set delete users which are in members having give access level but not in gitlab_users - if state == 'present' and purge_users: - uppercase_names_in_gitlab_users_access = [] - for name in gitlab_users_access: - uppercase_names_in_gitlab_users_access.append(name['name'].upper()) - - for member in members: - if member.access_level in purge_users and member.username.upper() not in uppercase_names_in_gitlab_users_access: - try: - if not module.check_mode: - group.remove_user_from_group(member.id, gitlab_group_id) - changed = True - changed_users.append("Successfully removed user '%s', from group. Was not in given list" % member.username) - changed_data.append({'gitlab_user': member.username, 'result': 'CHANGED', - 'msg': "Successfully removed user '%s', from group. Was not in given list" % member.username}) - except (gitlab.exceptions.GitlabDeleteError) as e: - error = True - changed_users.append("Failed to removed user, '%s', from the group" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', - 'msg': "Failed to remove user, '%s' from the group: %s" % (gitlab_user['name'], e)}) - - if len(gitlab_users_access) == 1 and error: - # if single user given and an error occurred return error for list errors will be per user - module.fail_json(msg="FAILED: '%s '" % changed_users[0], result_data=changed_data) - elif error: - module.fail_json(msg='FAILED: At least one given user/permission could not be set', result_data=changed_data) - - module.exit_json(changed=changed, msg='Successfully set memberships', result="\n".join(changed_users), result_data=changed_data) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group_variable.py b/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group_variable.py deleted file mode 100644 index 9be3a3ab..00000000 --- a/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group_variable.py +++ /dev/null @@ -1,464 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2020, Florent Madiot (scodeman@scode.io) -# Based on code: -# Copyright: (c) 2019, Markus Bergholz (markuman@gmail.com) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' -module: gitlab_group_variable -short_description: Creates, updates, or deletes GitLab groups variables -version_added: 1.2.0 -description: - - Creates a group variable if it does not exist. - - When a group variable does exist, its value will be updated when the values are different. - - Variables which are untouched in the playbook, but are not untouched in the GitLab group, - they stay untouched (I(purge) is C(false)) or will be deleted (I(purge) is C(true)). -author: - - Florent Madiot (@scodeman) -requirements: - - python >= 2.7 - - python-gitlab python module -extends_documentation_fragment: - - community.general.auth_basic - - community.general.gitlab - -options: - state: - description: - - Create or delete group variable. - default: present - type: str - choices: ["present", "absent"] - group: - description: - - The path and name of the group. - required: true - type: str - purge: - description: - - When set to C(true), delete all variables which are not untouched in the task. - default: false - type: bool - vars: - description: - - When the list element is a simple key-value pair, set masked and protected to false. - - When the list element is a dict with the keys I(value), I(masked) and I(protected), the user can - have full control about whether a value should be masked, protected or both. - - Support for group variables requires GitLab >= 9.5. - - Support for environment_scope requires GitLab Premium >= 13.11. - - Support for protected values requires GitLab >= 9.3. - - Support for masked values requires GitLab >= 11.10. - - A I(value) must be a string or a number. - - Field I(variable_type) must be a string with either C(env_var), which is the default, or C(file). - - When a value is masked, it must be in Base64 and have a length of at least 8 characters. - See GitLab documentation on acceptable values for a masked variable (U(https://docs.gitlab.com/ce/ci/variables/#masked-variables)). - default: {} - type: dict - variables: - version_added: 4.5.0 - description: - - A list of dictionaries that represents CI/CD variables. - - This modules works internal with this sructure, even if the older I(vars) parameter is used. - default: [] - type: list - elements: dict - suboptions: - name: - description: - - The name of the variable. - type: str - required: true - value: - description: - - The variable value. - - Required when I(state=present). - type: str - masked: - description: - - Wether variable value is masked or not. - type: bool - default: false - protected: - description: - - Wether variable value is protected or not. - type: bool - default: false - variable_type: - description: - - Wether a variable is an environment variable (C(env_var)) or a file (C(file)). - type: str - choices: [ "env_var", "file" ] - default: env_var - environment_scope: - description: - - The scope for the variable. - type: str - default: '*' -notes: -- Supports I(check_mode). -''' - - -EXAMPLES = r''' -- name: Set or update some CI/CD variables - community.general.gitlab_group_variable: - api_url: https://gitlab.com - api_token: secret_access_token - group: scodeman/testgroup/ - purge: false - variables: - - name: ACCESS_KEY_ID - value: abc123 - - name: SECRET_ACCESS_KEY - value: 3214cbad - masked: true - protected: true - variable_type: env_var - environment_scope: production - -- name: Delete one variable - community.general.gitlab_group_variable: - api_url: https://gitlab.com - api_token: secret_access_token - group: scodeman/testgroup/ - state: absent - vars: - ACCESS_KEY_ID: abc123 -''' - -RETURN = r''' -group_variable: - description: Four lists of the variablenames which were added, updated, removed or exist. - returned: always - type: dict - contains: - added: - description: A list of variables which were created. - returned: always - type: list - sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']" - untouched: - description: A list of variables which exist. - returned: always - type: list - sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']" - removed: - description: A list of variables which were deleted. - returned: always - type: list - sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']" - updated: - description: A list of variables whose values were changed. - returned: always - type: list - sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']" -''' - -import traceback -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.six import string_types -from ansible.module_utils.six import integer_types - -GITLAB_IMP_ERR = None -try: - import gitlab - HAS_GITLAB_PACKAGE = True -except Exception: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_GITLAB_PACKAGE = False - -from ansible_collections.community.general.plugins.module_utils.gitlab import auth_argument_spec, gitlab_authentication - - -def vars_to_variables(vars, module): - # transform old vars to new variables structure - variables = list() - for item, value in vars.items(): - if (isinstance(value, string_types) or - isinstance(value, (integer_types, float))): - variables.append( - { - "name": item, - "value": str(value), - "masked": False, - "protected": False, - "variable_type": "env_var", - } - ) - - elif isinstance(value, dict): - new_item = {"name": item, "value": value.get('value')} - - new_item = { - "name": item, - "value": value.get('value'), - "masked": value.get('masked'), - "protected": value.get('protected'), - "variable_type": value.get('variable_type'), - } - - if value.get('environment_scope'): - new_item['environment_scope'] = value.get('environment_scope') - - variables.append(new_item) - - else: - module.fail_json(msg="value must be of type string, integer, float or dict") - - return variables - - -class GitlabGroupVariables(object): - - def __init__(self, module, gitlab_instance): - self.repo = gitlab_instance - self.group = self.get_group(module.params['group']) - self._module = module - - def get_group(self, group_name): - return self.repo.groups.get(group_name) - - def list_all_group_variables(self): - page_nb = 1 - variables = [] - vars_page = self.group.variables.list(page=page_nb) - while len(vars_page) > 0: - variables += vars_page - page_nb += 1 - vars_page = self.group.variables.list(page=page_nb) - return variables - - def create_variable(self, var_obj): - if self._module.check_mode: - return True - var = { - "key": var_obj.get('key'), - "value": var_obj.get('value'), - "masked": var_obj.get('masked'), - "protected": var_obj.get('protected'), - "variable_type": var_obj.get('variable_type'), - } - if var_obj.get('environment_scope') is not None: - var["environment_scope"] = var_obj.get('environment_scope') - - self.group.variables.create(var) - return True - - def update_variable(self, var_obj): - if self._module.check_mode: - return True - self.delete_variable(var_obj) - self.create_variable(var_obj) - return True - - def delete_variable(self, var_obj): - if self._module.check_mode: - return True - self.group.variables.delete(var_obj.get('key'), filter={'environment_scope': var_obj.get('environment_scope')}) - return True - - -def compare(requested_variables, existing_variables, state): - # we need to do this, because it was determined in a previous version - more or less buggy - # basically it is not necessary and might results in more/other bugs! - # but it is required and only relevant for check mode!! - # logic represents state 'present' when not purge. all other can be derived from that - # untouched => equal in both - # updated => name and scope are equal - # added => name and scope does not exist - untouched = list() - updated = list() - added = list() - - if state == 'present': - existing_key_scope_vars = list() - for item in existing_variables: - existing_key_scope_vars.append({'key': item.get('key'), 'environment_scope': item.get('environment_scope')}) - - for var in requested_variables: - if var in existing_variables: - untouched.append(var) - else: - compare_item = {'key': var.get('name'), 'environment_scope': var.get('environment_scope')} - if compare_item in existing_key_scope_vars: - updated.append(var) - else: - added.append(var) - - return untouched, updated, added - - -def native_python_main(this_gitlab, purge, requested_variables, state, module): - - change = False - return_value = dict(added=list(), updated=list(), removed=list(), untouched=list()) - - gitlab_keys = this_gitlab.list_all_group_variables() - before = [x.attributes for x in gitlab_keys] - - gitlab_keys = this_gitlab.list_all_group_variables() - existing_variables = [x.attributes for x in gitlab_keys] - - # preprocessing:filter out and enrich before compare - for item in existing_variables: - item.pop('group_id') - - for item in requested_variables: - item['key'] = item.pop('name') - item['value'] = str(item.get('value')) - if item.get('protected') is None: - item['protected'] = False - if item.get('masked') is None: - item['masked'] = False - if item.get('environment_scope') is None: - item['environment_scope'] = '*' - if item.get('variable_type') is None: - item['variable_type'] = 'env_var' - - if module.check_mode: - untouched, updated, added = compare(requested_variables, existing_variables, state) - - if state == 'present': - add_or_update = [x for x in requested_variables if x not in existing_variables] - for item in add_or_update: - try: - if this_gitlab.create_variable(item): - return_value['added'].append(item) - - except Exception: - if this_gitlab.update_variable(item): - return_value['updated'].append(item) - - if purge: - # refetch and filter - gitlab_keys = this_gitlab.list_all_group_variables() - existing_variables = [x.attributes for x in gitlab_keys] - for item in existing_variables: - item.pop('group_id') - - remove = [x for x in existing_variables if x not in requested_variables] - for item in remove: - if this_gitlab.delete_variable(item): - return_value['removed'].append(item) - - elif state == 'absent': - # value does not matter on removing variables. - # key and environment scope are sufficient - for item in existing_variables: - item.pop('value') - item.pop('variable_type') - for item in requested_variables: - item.pop('value') - item.pop('variable_type') - - if not purge: - remove_requested = [x for x in requested_variables if x in existing_variables] - for item in remove_requested: - if this_gitlab.delete_variable(item): - return_value['removed'].append(item) - - else: - for item in existing_variables: - if this_gitlab.delete_variable(item): - return_value['removed'].append(item) - - if module.check_mode: - return_value = dict(added=added, updated=updated, removed=return_value['removed'], untouched=untouched) - - if len(return_value['added'] + return_value['removed'] + return_value['updated']) > 0: - change = True - - gitlab_keys = this_gitlab.list_all_group_variables() - after = [x.attributes for x in gitlab_keys] - - return change, return_value, before, after - - -def main(): - argument_spec = basic_auth_argument_spec() - argument_spec.update(auth_argument_spec()) - argument_spec.update( - group=dict(type='str', required=True), - purge=dict(type='bool', required=False, default=False), - vars=dict(type='dict', required=False, default=dict(), no_log=True), - variables=dict(type='list', elements='dict', required=False, default=list(), options=dict( - name=dict(type='str', required=True), - value=dict(type='str', no_log=True), - masked=dict(type='bool', default=False), - protected=dict(type='bool', default=False), - environment_scope=dict(type='str', default='*'), - variable_type=dict(type='str', default='env_var', choices=["env_var", "file"]) - )), - state=dict(type='str', default="present", choices=["absent", "present"]), - ) - - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_token', 'api_job_token'], - ['vars', 'variables'], - ], - required_together=[ - ['api_username', 'api_password'], - ], - required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] - ], - supports_check_mode=True - ) - - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) - - purge = module.params['purge'] - var_list = module.params['vars'] - state = module.params['state'] - - if var_list: - variables = vars_to_variables(var_list, module) - else: - variables = module.params['variables'] - - if state == 'present': - if any(x['value'] is None for x in variables): - module.fail_json(msg='value parameter is required in state present') - - gitlab_instance = gitlab_authentication(module) - - this_gitlab = GitlabGroupVariables(module=module, gitlab_instance=gitlab_instance) - - changed, raw_return_value, before, after = native_python_main(this_gitlab, purge, variables, state, module) - - # postprocessing - for item in after: - item.pop('group_id') - item['name'] = item.pop('key') - for item in before: - item.pop('group_id') - item['name'] = item.pop('key') - - untouched_key_name = 'key' - if not module.check_mode: - untouched_key_name = 'name' - raw_return_value['untouched'] = [x for x in before if x in after] - - added = [x.get('key') for x in raw_return_value['added']] - updated = [x.get('key') for x in raw_return_value['updated']] - removed = [x.get('key') for x in raw_return_value['removed']] - untouched = [x.get(untouched_key_name) for x in raw_return_value['untouched']] - return_value = dict(added=added, updated=updated, removed=removed, untouched=untouched) - - module.exit_json(changed=changed, group_variable=return_value) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_hook.py b/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_hook.py deleted file mode 100644 index 1fb03342..00000000 --- a/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_hook.py +++ /dev/null @@ -1,390 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) -# Copyright: (c) 2018, Marcus Watkins -# Based on code: -# Copyright: (c) 2013, Phillip Gentry -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: gitlab_hook -short_description: Manages GitLab project hooks. -description: - - Adds, updates and removes project hook -author: - - Marcus Watkins (@marwatk) - - Guillaume Martinez (@Lunik) -requirements: - - python >= 2.7 - - python-gitlab python module -extends_documentation_fragment: - - community.general.auth_basic - - community.general.gitlab - -options: - project: - description: - - Id or Full path of the project in the form of group/name. - required: true - type: str - hook_url: - description: - - The url that you want GitLab to post to, this is used as the primary key for updates and deletion. - required: true - type: str - state: - description: - - When C(present) the hook will be updated to match the input or created if it doesn't exist. - - When C(absent) hook will be deleted if it exists. - default: present - type: str - choices: [ "present", "absent" ] - push_events: - description: - - Trigger hook on push events. - type: bool - default: yes - push_events_branch_filter: - description: - - Branch name of wildcard to trigger hook on push events - type: str - version_added: '0.2.0' - issues_events: - description: - - Trigger hook on issues events. - type: bool - default: no - merge_requests_events: - description: - - Trigger hook on merge requests events. - type: bool - default: no - tag_push_events: - description: - - Trigger hook on tag push events. - type: bool - default: no - note_events: - description: - - Trigger hook on note events or when someone adds a comment. - type: bool - default: no - job_events: - description: - - Trigger hook on job events. - type: bool - default: no - pipeline_events: - description: - - Trigger hook on pipeline events. - type: bool - default: no - wiki_page_events: - description: - - Trigger hook on wiki events. - type: bool - default: no - hook_validate_certs: - description: - - Whether GitLab will do SSL verification when triggering the hook. - type: bool - default: no - aliases: [ enable_ssl_verification ] - token: - description: - - Secret token to validate hook messages at the receiver. - - If this is present it will always result in a change as it cannot be retrieved from GitLab. - - Will show up in the X-GitLab-Token HTTP request header. - required: false - type: str -''' - -EXAMPLES = ''' -- name: "Adding a project hook" - community.general.gitlab_hook: - api_url: https://gitlab.example.com/ - api_token: "{{ access_token }}" - project: "my_group/my_project" - hook_url: "https://my-ci-server.example.com/gitlab-hook" - state: present - push_events: yes - tag_push_events: yes - hook_validate_certs: no - token: "my-super-secret-token-that-my-ci-server-will-check" - -- name: "Delete the previous hook" - community.general.gitlab_hook: - api_url: https://gitlab.example.com/ - api_token: "{{ access_token }}" - project: "my_group/my_project" - hook_url: "https://my-ci-server.example.com/gitlab-hook" - state: absent - -- name: "Delete a hook by numeric project id" - community.general.gitlab_hook: - api_url: https://gitlab.example.com/ - api_token: "{{ access_token }}" - project: 10 - hook_url: "https://my-ci-server.example.com/gitlab-hook" - state: absent -''' - -RETURN = ''' -msg: - description: Success or failure message - returned: always - type: str - sample: "Success" - -result: - description: json parsed response from the server - returned: always - type: dict - -error: - description: the error message returned by the GitLab API - returned: failed - type: str - sample: "400: path is already in use" - -hook: - description: API object - returned: always - type: dict -''' - -import re -import traceback - -GITLAB_IMP_ERR = None -try: - import gitlab - HAS_GITLAB_PACKAGE = True -except Exception: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_GITLAB_PACKAGE = False - -from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - -from ansible_collections.community.general.plugins.module_utils.gitlab import auth_argument_spec, find_project, gitlab_authentication - - -class GitLabHook(object): - def __init__(self, module, gitlab_instance): - self._module = module - self._gitlab = gitlab_instance - self.hook_object = None - - ''' - @param project Project Object - @param hook_url Url to call on event - @param description Description of the group - @param parent Parent group full path - ''' - def create_or_update_hook(self, project, hook_url, options): - changed = False - - # Because we have already call userExists in main() - if self.hook_object is None: - hook = self.create_hook(project, { - 'url': hook_url, - 'push_events': options['push_events'], - 'push_events_branch_filter': options['push_events_branch_filter'], - 'issues_events': options['issues_events'], - 'merge_requests_events': options['merge_requests_events'], - 'tag_push_events': options['tag_push_events'], - 'note_events': options['note_events'], - 'job_events': options['job_events'], - 'pipeline_events': options['pipeline_events'], - 'wiki_page_events': options['wiki_page_events'], - 'enable_ssl_verification': options['enable_ssl_verification'], - 'token': options['token'], - }) - changed = True - else: - changed, hook = self.update_hook(self.hook_object, { - 'push_events': options['push_events'], - 'push_events_branch_filter': options['push_events_branch_filter'], - 'issues_events': options['issues_events'], - 'merge_requests_events': options['merge_requests_events'], - 'tag_push_events': options['tag_push_events'], - 'note_events': options['note_events'], - 'job_events': options['job_events'], - 'pipeline_events': options['pipeline_events'], - 'wiki_page_events': options['wiki_page_events'], - 'enable_ssl_verification': options['enable_ssl_verification'], - 'token': options['token'], - }) - - self.hook_object = hook - if changed: - if self._module.check_mode: - self._module.exit_json(changed=True, msg="Successfully created or updated the hook %s" % hook_url) - - try: - hook.save() - except Exception as e: - self._module.fail_json(msg="Failed to update hook: %s " % e) - return True - else: - return False - - ''' - @param project Project Object - @param arguments Attributes of the hook - ''' - def create_hook(self, project, arguments): - if self._module.check_mode: - return True - - hook = project.hooks.create(arguments) - - return hook - - ''' - @param hook Hook Object - @param arguments Attributes of the hook - ''' - def update_hook(self, hook, arguments): - changed = False - - for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - if getattr(hook, arg_key) != arguments[arg_key]: - setattr(hook, arg_key, arguments[arg_key]) - changed = True - - return (changed, hook) - - ''' - @param project Project object - @param hook_url Url to call on event - ''' - def find_hook(self, project, hook_url): - hooks = project.hooks.list() - for hook in hooks: - if (hook.url == hook_url): - return hook - - ''' - @param project Project object - @param hook_url Url to call on event - ''' - def exists_hook(self, project, hook_url): - # When project exists, object will be stored in self.project_object. - hook = self.find_hook(project, hook_url) - if hook: - self.hook_object = hook - return True - return False - - def delete_hook(self): - if self._module.check_mode: - return True - - return self.hook_object.delete() - - -def main(): - argument_spec = basic_auth_argument_spec() - argument_spec.update(auth_argument_spec()) - argument_spec.update(dict( - state=dict(type='str', default="present", choices=["absent", "present"]), - project=dict(type='str', required=True), - hook_url=dict(type='str', required=True), - push_events=dict(type='bool', default=True), - push_events_branch_filter=dict(type='str', default=''), - issues_events=dict(type='bool', default=False), - merge_requests_events=dict(type='bool', default=False), - tag_push_events=dict(type='bool', default=False), - note_events=dict(type='bool', default=False), - job_events=dict(type='bool', default=False), - pipeline_events=dict(type='bool', default=False), - wiki_page_events=dict(type='bool', default=False), - hook_validate_certs=dict(type='bool', default=False, aliases=['enable_ssl_verification']), - token=dict(type='str', no_log=True), - )) - - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_token', 'api_job_token'], - ], - required_together=[ - ['api_username', 'api_password'] - ], - required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] - ], - supports_check_mode=True, - ) - - state = module.params['state'] - project_identifier = module.params['project'] - hook_url = module.params['hook_url'] - push_events = module.params['push_events'] - push_events_branch_filter = module.params['push_events_branch_filter'] - issues_events = module.params['issues_events'] - merge_requests_events = module.params['merge_requests_events'] - tag_push_events = module.params['tag_push_events'] - note_events = module.params['note_events'] - job_events = module.params['job_events'] - pipeline_events = module.params['pipeline_events'] - wiki_page_events = module.params['wiki_page_events'] - enable_ssl_verification = module.params['hook_validate_certs'] - hook_token = module.params['token'] - - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) - - gitlab_instance = gitlab_authentication(module) - - gitlab_hook = GitLabHook(module, gitlab_instance) - - project = find_project(gitlab_instance, project_identifier) - - if project is None: - module.fail_json(msg="Failed to create hook: project %s doesn't exists" % project_identifier) - - hook_exists = gitlab_hook.exists_hook(project, hook_url) - - if state == 'absent': - if hook_exists: - gitlab_hook.delete_hook() - module.exit_json(changed=True, msg="Successfully deleted hook %s" % hook_url) - else: - module.exit_json(changed=False, msg="Hook deleted or does not exists") - - if state == 'present': - if gitlab_hook.create_or_update_hook(project, hook_url, { - "push_events": push_events, - "push_events_branch_filter": push_events_branch_filter, - "issues_events": issues_events, - "merge_requests_events": merge_requests_events, - "tag_push_events": tag_push_events, - "note_events": note_events, - "job_events": job_events, - "pipeline_events": pipeline_events, - "wiki_page_events": wiki_page_events, - "enable_ssl_verification": enable_ssl_verification, - "token": hook_token, - }): - - module.exit_json(changed=True, msg="Successfully created or updated the hook %s" % hook_url, hook=gitlab_hook.hook_object._attrs) - else: - module.exit_json(changed=False, msg="No need to update the hook %s" % hook_url, hook=gitlab_hook.hook_object._attrs) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_project.py b/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_project.py deleted file mode 100644 index 907757c4..00000000 --- a/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_project.py +++ /dev/null @@ -1,551 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) -# Copyright: (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: gitlab_project -short_description: Creates/updates/deletes GitLab Projects -description: - - When the project does not exist in GitLab, it will be created. - - When the project does exists and I(state=absent), the project will be deleted. - - When changes are made to the project, the project will be updated. -author: - - Werner Dijkerman (@dj-wasabi) - - Guillaume Martinez (@Lunik) -requirements: - - python >= 2.7 - - python-gitlab python module -extends_documentation_fragment: - - community.general.auth_basic - - community.general.gitlab - -options: - group: - description: - - Id or the full path of the group of which this projects belongs to. - type: str - name: - description: - - The name of the project. - required: true - type: str - path: - description: - - The path of the project you want to create, this will be server_url//path. - - If not supplied, name will be used. - type: str - description: - description: - - An description for the project. - type: str - initialize_with_readme: - description: - - Will initialize the project with a default C(README.md). - - Is only used when the project is created, and ignored otherwise. - type: bool - default: false - version_added: "4.0.0" - issues_enabled: - description: - - Whether you want to create issues or not. - - Possible values are true and false. - type: bool - default: yes - merge_requests_enabled: - description: - - If merge requests can be made or not. - - Possible values are true and false. - type: bool - default: yes - wiki_enabled: - description: - - If an wiki for this project should be available or not. - type: bool - default: yes - snippets_enabled: - description: - - If creating snippets should be available or not. - type: bool - default: yes - visibility: - description: - - C(private) Project access must be granted explicitly for each user. - - C(internal) The project can be cloned by any logged in user. - - C(public) The project can be cloned without any authentication. - default: private - type: str - choices: ["private", "internal", "public"] - aliases: - - visibility_level - import_url: - description: - - Git repository which will be imported into gitlab. - - GitLab server needs read access to this git repository. - required: false - type: str - state: - description: - - Create or delete project. - - Possible values are present and absent. - default: present - type: str - choices: ["present", "absent"] - merge_method: - description: - - What requirements are placed upon merges. - - Possible values are C(merge), C(rebase_merge) merge commit with semi-linear history, C(ff) fast-forward merges only. - type: str - choices: ["ff", "merge", "rebase_merge"] - default: merge - version_added: "1.0.0" - lfs_enabled: - description: - - Enable Git large file systems to manages large files such - as audio, video, and graphics files. - type: bool - required: false - default: false - version_added: "2.0.0" - username: - description: - - Used to create a personal project under a user's name. - type: str - version_added: "3.3.0" - allow_merge_on_skipped_pipeline: - description: - - Allow merge when skipped pipelines exist. - type: bool - version_added: "3.4.0" - only_allow_merge_if_all_discussions_are_resolved: - description: - - All discussions on a merge request (MR) have to be resolved. - type: bool - version_added: "3.4.0" - only_allow_merge_if_pipeline_succeeds: - description: - - Only allow merges if pipeline succeeded. - type: bool - version_added: "3.4.0" - packages_enabled: - description: - - Enable GitLab package repository. - type: bool - version_added: "3.4.0" - remove_source_branch_after_merge: - description: - - Remove the source branch after merge. - type: bool - version_added: "3.4.0" - squash_option: - description: - - Squash commits when merging. - type: str - choices: ["never", "always", "default_off", "default_on"] - version_added: "3.4.0" - ci_config_path: - description: - - Custom path to the CI configuration file for this project. - type: str - version_added: "3.7.0" - shared_runners_enabled: - description: - - Enable shared runners for this project. - type: bool - version_added: "3.7.0" - avatar_path: - description: - - Absolute path image to configure avatar. File size should not exceed 200 kb. - - This option is only used on creation, not for updates. - type: path - version_added: "4.2.0" - default_branch: - description: - - Default branch name for a new project. - - This option is only used on creation, not for updates. This is also only used if I(initialize_with_readme=true). - type: str - version_added: "4.2.0" -''' - -EXAMPLES = r''' -- name: Create GitLab Project - community.general.gitlab_project: - api_url: https://gitlab.example.com/ - api_token: "{{ api_token }}" - name: my_first_project - group: "10481470" - -- name: Delete GitLab Project - community.general.gitlab_project: - api_url: https://gitlab.example.com/ - api_token: "{{ access_token }}" - validate_certs: False - name: my_first_project - state: absent - delegate_to: localhost - -- name: Create GitLab Project in group Ansible - community.general.gitlab_project: - api_url: https://gitlab.example.com/ - validate_certs: True - api_username: dj-wasabi - api_password: "MySecretPassword" - name: my_first_project - group: ansible - issues_enabled: False - merge_method: rebase_merge - wiki_enabled: True - snippets_enabled: True - import_url: http://git.example.com/example/lab.git - initialize_with_readme: true - state: present - delegate_to: localhost - -- name: get the initial root password - ansible.builtin.shell: | - grep 'Password:' /etc/gitlab/initial_root_password | sed -e 's/Password\: \(.*\)/\1/' - register: initial_root_password - -- name: Create a GitLab Project using a username/password via oauth_token - community.general.gitlab_project: - api_url: https://gitlab.example.com/ - api_username: root - api_password: "{{ initial_root_password }}" - name: my_second_project - group: "10481470" -''' - -RETURN = r''' -msg: - description: Success or failure message. - returned: always - type: str - sample: "Success" - -result: - description: json parsed response from the server. - returned: always - type: dict - -error: - description: the error message returned by the GitLab API. - returned: failed - type: str - sample: "400: path is already in use" - -project: - description: API object. - returned: always - type: dict -''' - -import traceback - -GITLAB_IMP_ERR = None -try: - import gitlab - HAS_GITLAB_PACKAGE = True -except Exception: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_GITLAB_PACKAGE = False - -from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - -from ansible_collections.community.general.plugins.module_utils.gitlab import auth_argument_spec, find_group, find_project, gitlab_authentication - - -class GitLabProject(object): - def __init__(self, module, gitlab_instance): - self._module = module - self._gitlab = gitlab_instance - self.project_object = None - - ''' - @param project_name Name of the project - @param namespace Namespace Object (User or Group) - @param options Options of the project - ''' - def create_or_update_project(self, project_name, namespace, options): - changed = False - project_options = { - 'name': project_name, - 'description': options['description'], - 'issues_enabled': options['issues_enabled'], - 'merge_requests_enabled': options['merge_requests_enabled'], - 'merge_method': options['merge_method'], - 'wiki_enabled': options['wiki_enabled'], - 'snippets_enabled': options['snippets_enabled'], - 'visibility': options['visibility'], - 'lfs_enabled': options['lfs_enabled'], - 'allow_merge_on_skipped_pipeline': options['allow_merge_on_skipped_pipeline'], - 'only_allow_merge_if_all_discussions_are_resolved': options['only_allow_merge_if_all_discussions_are_resolved'], - 'only_allow_merge_if_pipeline_succeeds': options['only_allow_merge_if_pipeline_succeeds'], - 'packages_enabled': options['packages_enabled'], - 'remove_source_branch_after_merge': options['remove_source_branch_after_merge'], - 'squash_option': options['squash_option'], - 'ci_config_path': options['ci_config_path'], - 'shared_runners_enabled': options['shared_runners_enabled'], - } - # Because we have already call userExists in main() - if self.project_object is None: - project_options.update({ - 'path': options['path'], - 'import_url': options['import_url'], - }) - if options['initialize_with_readme']: - project_options['initialize_with_readme'] = options['initialize_with_readme'] - if options['default_branch']: - project_options['default_branch'] = options['default_branch'] - - project_options = self.get_options_with_value(project_options) - project = self.create_project(namespace, project_options) - - # add avatar to project - if options['avatar_path']: - try: - project.avatar = open(options['avatar_path'], 'rb') - except IOError as e: - self._module.fail_json(msg='Cannot open {0}: {1}'.format(options['avatar_path'], e)) - - changed = True - else: - changed, project = self.update_project(self.project_object, project_options) - - self.project_object = project - if changed: - if self._module.check_mode: - self._module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name) - - try: - project.save() - except Exception as e: - self._module.fail_json(msg="Failed update project: %s " % e) - return True - return False - - ''' - @param namespace Namespace Object (User or Group) - @param arguments Attributes of the project - ''' - def create_project(self, namespace, arguments): - if self._module.check_mode: - return True - - arguments['namespace_id'] = namespace.id - try: - project = self._gitlab.projects.create(arguments) - except (gitlab.exceptions.GitlabCreateError) as e: - self._module.fail_json(msg="Failed to create project: %s " % to_native(e)) - - return project - - ''' - @param arguments Attributes of the project - ''' - def get_options_with_value(self, arguments): - ret_arguments = dict() - for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - ret_arguments[arg_key] = arg_value - - return ret_arguments - - ''' - @param project Project Object - @param arguments Attributes of the project - ''' - def update_project(self, project, arguments): - changed = False - - for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - if getattr(project, arg_key) != arguments[arg_key]: - setattr(project, arg_key, arguments[arg_key]) - changed = True - - return (changed, project) - - def delete_project(self): - if self._module.check_mode: - return True - - project = self.project_object - - return project.delete() - - ''' - @param namespace User/Group object - @param name Name of the project - ''' - def exists_project(self, namespace, path): - # When project exists, object will be stored in self.project_object. - project = find_project(self._gitlab, namespace.full_path + '/' + path) - if project: - self.project_object = project - return True - return False - - -def main(): - argument_spec = basic_auth_argument_spec() - argument_spec.update(auth_argument_spec()) - argument_spec.update(dict( - group=dict(type='str'), - name=dict(type='str', required=True), - path=dict(type='str'), - description=dict(type='str'), - initialize_with_readme=dict(type='bool', default=False), - default_branch=dict(type='str'), - issues_enabled=dict(type='bool', default=True), - merge_requests_enabled=dict(type='bool', default=True), - merge_method=dict(type='str', default='merge', choices=["merge", "rebase_merge", "ff"]), - wiki_enabled=dict(type='bool', default=True), - snippets_enabled=dict(default=True, type='bool'), - visibility=dict(type='str', default="private", choices=["internal", "private", "public"], aliases=["visibility_level"]), - import_url=dict(type='str'), - state=dict(type='str', default="present", choices=["absent", "present"]), - lfs_enabled=dict(default=False, type='bool'), - username=dict(type='str'), - allow_merge_on_skipped_pipeline=dict(type='bool'), - only_allow_merge_if_all_discussions_are_resolved=dict(type='bool'), - only_allow_merge_if_pipeline_succeeds=dict(type='bool'), - packages_enabled=dict(type='bool'), - remove_source_branch_after_merge=dict(type='bool'), - squash_option=dict(type='str', choices=['never', 'always', 'default_off', 'default_on']), - ci_config_path=dict(type='str'), - shared_runners_enabled=dict(type='bool'), - avatar_path=dict(type='path'), - )) - - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_token', 'api_job_token'], - ['group', 'username'], - ], - required_together=[ - ['api_username', 'api_password'], - ], - required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] - ], - supports_check_mode=True, - ) - - group_identifier = module.params['group'] - project_name = module.params['name'] - project_path = module.params['path'] - project_description = module.params['description'] - initialize_with_readme = module.params['initialize_with_readme'] - issues_enabled = module.params['issues_enabled'] - merge_requests_enabled = module.params['merge_requests_enabled'] - merge_method = module.params['merge_method'] - wiki_enabled = module.params['wiki_enabled'] - snippets_enabled = module.params['snippets_enabled'] - visibility = module.params['visibility'] - import_url = module.params['import_url'] - state = module.params['state'] - lfs_enabled = module.params['lfs_enabled'] - username = module.params['username'] - allow_merge_on_skipped_pipeline = module.params['allow_merge_on_skipped_pipeline'] - only_allow_merge_if_all_discussions_are_resolved = module.params['only_allow_merge_if_all_discussions_are_resolved'] - only_allow_merge_if_pipeline_succeeds = module.params['only_allow_merge_if_pipeline_succeeds'] - packages_enabled = module.params['packages_enabled'] - remove_source_branch_after_merge = module.params['remove_source_branch_after_merge'] - squash_option = module.params['squash_option'] - ci_config_path = module.params['ci_config_path'] - shared_runners_enabled = module.params['shared_runners_enabled'] - avatar_path = module.params['avatar_path'] - default_branch = module.params['default_branch'] - - if default_branch and not initialize_with_readme: - module.fail_json(msg="Param default_branch need param initialize_with_readme set to true") - - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) - - gitlab_instance = gitlab_authentication(module) - - # Set project_path to project_name if it is empty. - if project_path is None: - project_path = project_name.replace(" ", "_") - - gitlab_project = GitLabProject(module, gitlab_instance) - - namespace = None - namespace_id = None - if group_identifier: - group = find_group(gitlab_instance, group_identifier) - if group is None: - module.fail_json(msg="Failed to create project: group %s doesn't exists" % group_identifier) - - namespace_id = group.id - else: - if username: - namespace = gitlab_instance.namespaces.list(search=username)[0] - else: - namespace = gitlab_instance.namespaces.list(search=gitlab_instance.user.username)[0] - namespace_id = namespace.id - - if not namespace_id: - module.fail_json(msg="Failed to find the namespace or group ID which is required to look up the namespace") - - try: - namespace = gitlab_instance.namespaces.get(namespace_id) - except gitlab.exceptions.GitlabGetError as e: - module.fail_json(msg="Failed to find the namespace for the given user: %s" % to_native(e)) - - if not namespace: - module.fail_json(msg="Failed to find the namespace for the project") - project_exists = gitlab_project.exists_project(namespace, project_path) - - if state == 'absent': - if project_exists: - gitlab_project.delete_project() - module.exit_json(changed=True, msg="Successfully deleted project %s" % project_name) - module.exit_json(changed=False, msg="Project deleted or does not exists") - - if state == 'present': - - if gitlab_project.create_or_update_project(project_name, namespace, { - "path": project_path, - "description": project_description, - "initialize_with_readme": initialize_with_readme, - "default_branch": default_branch, - "issues_enabled": issues_enabled, - "merge_requests_enabled": merge_requests_enabled, - "merge_method": merge_method, - "wiki_enabled": wiki_enabled, - "snippets_enabled": snippets_enabled, - "visibility": visibility, - "import_url": import_url, - "lfs_enabled": lfs_enabled, - "allow_merge_on_skipped_pipeline": allow_merge_on_skipped_pipeline, - "only_allow_merge_if_all_discussions_are_resolved": only_allow_merge_if_all_discussions_are_resolved, - "only_allow_merge_if_pipeline_succeeds": only_allow_merge_if_pipeline_succeeds, - "packages_enabled": packages_enabled, - "remove_source_branch_after_merge": remove_source_branch_after_merge, - "squash_option": squash_option, - "ci_config_path": ci_config_path, - "shared_runners_enabled": shared_runners_enabled, - "avatar_path": avatar_path, - }): - - module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name, project=gitlab_project.project_object._attrs) - module.exit_json(changed=False, msg="No need to update the project %s" % project_name, project=gitlab_project.project_object._attrs) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_project_members.py b/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_project_members.py deleted file mode 100644 index 97cbbdf6..00000000 --- a/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_project_members.py +++ /dev/null @@ -1,452 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Sergey Mikhaltsov -# Copyright: (c) 2020, Zainab Alsaffar -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: gitlab_project_members -short_description: Manage project members on GitLab Server -version_added: 2.2.0 -description: - - This module allows to add and remove members to/from a project, or change a member's access level in a project on GitLab. -author: - - Sergey Mikhaltsov (@metanovii) - - Zainab Alsaffar (@zanssa) -requirements: - - python-gitlab python module <= 1.15.0 - - owner or maintainer rights to project on the GitLab server -extends_documentation_fragment: - - community.general.auth_basic - - community.general.gitlab - -options: - project: - description: - - The name (or full path) of the GitLab project the member is added to/removed from. - required: true - type: str - gitlab_user: - description: - - A username or a list of usernames to add to/remove from the GitLab project. - - Mutually exclusive with I(gitlab_users_access). - type: list - elements: str - access_level: - description: - - The access level for the user. - - Required if I(state=present), user state is set to present. - type: str - choices: ['guest', 'reporter', 'developer', 'maintainer'] - gitlab_users_access: - description: - - Provide a list of user to access level mappings. - - Every dictionary in this list specifies a user (by username) and the access level the user should have. - - Mutually exclusive with I(gitlab_user) and I(access_level). - - Use together with I(purge_users) to remove all users not specified here from the project. - type: list - elements: dict - suboptions: - name: - description: A username or a list of usernames to add to/remove from the GitLab project. - type: str - required: true - access_level: - description: - - The access level for the user. - - Required if I(state=present), user state is set to present. - type: str - choices: ['guest', 'reporter', 'developer', 'maintainer'] - required: true - version_added: 3.7.0 - state: - description: - - State of the member in the project. - - On C(present), it adds a user to a GitLab project. - - On C(absent), it removes a user from a GitLab project. - choices: ['present', 'absent'] - default: 'present' - type: str - purge_users: - description: - - Adds/remove users of the given access_level to match the given I(gitlab_user)/I(gitlab_users_access) list. - If omitted do not purge orphaned members. - - Is only used when I(state=present). - type: list - elements: str - choices: ['guest', 'reporter', 'developer', 'maintainer'] - version_added: 3.7.0 -notes: - - Supports C(check_mode). -''' - -EXAMPLES = r''' -- name: Add a user to a GitLab Project - community.general.gitlab_project_members: - api_url: 'https://gitlab.example.com' - api_token: 'Your-Private-Token' - validate_certs: True - project: projectname - gitlab_user: username - access_level: developer - state: present - -- name: Remove a user from a GitLab project - community.general.gitlab_project_members: - api_url: 'https://gitlab.example.com' - api_token: 'Your-Private-Token' - validate_certs: False - project: projectname - gitlab_user: username - state: absent - -- name: Add a list of Users to A GitLab project - community.general.gitlab_project_members: - api_url: 'https://gitlab.example.com' - api_token: 'Your-Private-Token' - gitlab_project: projectname - gitlab_user: - - user1 - - user2 - access_level: developer - state: present - -- name: Add a list of Users with Dedicated Access Levels to A GitLab project - community.general.gitlab_project_members: - api_url: 'https://gitlab.example.com' - api_token: 'Your-Private-Token' - project: projectname - gitlab_users_access: - - name: user1 - access_level: developer - - name: user2 - access_level: maintainer - state: present - -- name: Add a user, remove all others which might be on this access level - community.general.gitlab_project_members: - api_url: 'https://gitlab.example.com' - api_token: 'Your-Private-Token' - project: projectname - gitlab_user: username - access_level: developer - pruge_users: developer - state: present - -- name: Remove a list of Users with Dedicated Access Levels to A GitLab project - community.general.gitlab_project_members: - api_url: 'https://gitlab.example.com' - api_token: 'Your-Private-Token' - project: projectname - gitlab_users_access: - - name: user1 - access_level: developer - - name: user2 - access_level: maintainer - state: absent -''' - -RETURN = r''' # ''' - -from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - -from ansible_collections.community.general.plugins.module_utils.gitlab import auth_argument_spec, gitlab_authentication - -import traceback - -try: - import gitlab - HAS_PY_GITLAB = True -except ImportError: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_PY_GITLAB = False - - -class GitLabProjectMembers(object): - def __init__(self, module, gl): - self._module = module - self._gitlab = gl - - def get_project(self, project_name): - try: - project_exists = self._gitlab.projects.get(project_name) - return project_exists.id - except gitlab.exceptions.GitlabGetError as e: - project_exists = self._gitlab.projects.list(search=project_name) - if project_exists: - return project_exists[0].id - - def get_user_id(self, gitlab_user): - user_exists = self._gitlab.users.list(username=gitlab_user) - if user_exists: - return user_exists[0].id - - # get all members in a project - def get_members_in_a_project(self, gitlab_project_id): - project = self._gitlab.projects.get(gitlab_project_id) - return project.members.list(all=True) - - # get single member in a project by user name - def get_member_in_a_project(self, gitlab_project_id, gitlab_user_id): - member = None - project = self._gitlab.projects.get(gitlab_project_id) - try: - member = project.members.get(gitlab_user_id) - if member: - return member - except gitlab.exceptions.GitlabGetError as e: - return None - - # check if the user is a member of the project - def is_user_a_member(self, members, gitlab_user_id): - for member in members: - if member.id == gitlab_user_id: - return True - return False - - # add user to a project - def add_member_to_project(self, gitlab_user_id, gitlab_project_id, access_level): - project = self._gitlab.projects.get(gitlab_project_id) - add_member = project.members.create( - {'user_id': gitlab_user_id, 'access_level': access_level}) - - # remove user from a project - def remove_user_from_project(self, gitlab_user_id, gitlab_project_id): - project = self._gitlab.projects.get(gitlab_project_id) - project.members.delete(gitlab_user_id) - - # get user's access level - def get_user_access_level(self, members, gitlab_user_id): - for member in members: - if member.id == gitlab_user_id: - return member.access_level - - # update user's access level in a project - def update_user_access_level(self, members, gitlab_user_id, access_level): - for member in members: - if member.id == gitlab_user_id: - member.access_level = access_level - member.save() - - -def main(): - argument_spec = basic_auth_argument_spec() - argument_spec.update(auth_argument_spec()) - argument_spec.update(dict( - project=dict(type='str', required=True), - gitlab_user=dict(type='list', elements='str'), - state=dict(type='str', default='present', choices=['present', 'absent']), - access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer']), - purge_users=dict(type='list', elements='str', choices=[ - 'guest', 'reporter', 'developer', 'maintainer']), - gitlab_users_access=dict( - type='list', - elements='dict', - options=dict( - name=dict(type='str', required=True), - access_level=dict(type='str', choices=[ - 'guest', 'reporter', 'developer', 'maintainer'], required=True), - ) - ), - )) - - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_token', 'api_job_token'], - ['gitlab_user', 'gitlab_users_access'], - ['access_level', 'gitlab_users_access'], - ], - required_together=[ - ['api_username', 'api_password'], - ['gitlab_user', 'access_level'], - ], - required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'], - ['gitlab_user', 'gitlab_users_access'], - ], - required_if=[ - ['state', 'present', ['access_level', 'gitlab_users_access'], True], - ], - supports_check_mode=True, - ) - - if not HAS_PY_GITLAB: - module.fail_json(msg=missing_required_lib('python-gitlab', url='https://python-gitlab.readthedocs.io/en/stable/'), exception=GITLAB_IMP_ERR) - - access_level_int = { - 'guest': gitlab.GUEST_ACCESS, - 'reporter': gitlab.REPORTER_ACCESS, - 'developer': gitlab.DEVELOPER_ACCESS, - 'maintainer': gitlab.MAINTAINER_ACCESS, - } - - gitlab_project = module.params['project'] - state = module.params['state'] - access_level = module.params['access_level'] - purge_users = module.params['purge_users'] - - if purge_users: - purge_users = [access_level_int[level] for level in purge_users] - - # connect to gitlab server - gl = gitlab_authentication(module) - - project = GitLabProjectMembers(module, gl) - - gitlab_project_id = project.get_project(gitlab_project) - - # project doesn't exist - if not gitlab_project_id: - module.fail_json(msg="project '%s' not found." % gitlab_project) - - members = [] - if module.params['gitlab_user'] is not None: - gitlab_users_access = [] - gitlab_users = module.params['gitlab_user'] - for gl_user in gitlab_users: - gitlab_users_access.append( - {'name': gl_user, 'access_level': access_level_int[access_level] if access_level else None}) - elif module.params['gitlab_users_access'] is not None: - gitlab_users_access = module.params['gitlab_users_access'] - for user_level in gitlab_users_access: - user_level['access_level'] = access_level_int[user_level['access_level']] - - if len(gitlab_users_access) == 1 and not purge_users: - # only single user given - members = [project.get_member_in_a_project( - gitlab_project_id, project.get_user_id(gitlab_users_access[0]['name']))] - if members[0] is None: - members = [] - elif len(gitlab_users_access) > 1 or purge_users: - # list of users given - members = project.get_members_in_a_project(gitlab_project_id) - else: - module.exit_json(changed='OK', result="Nothing to do, please give at least one user or set purge_users true.", - result_data=[]) - - changed = False - error = False - changed_users = [] - changed_data = [] - - for gitlab_user in gitlab_users_access: - gitlab_user_id = project.get_user_id(gitlab_user['name']) - - # user doesn't exist - if not gitlab_user_id: - if state == 'absent': - changed_users.append("user '%s' not found, and thus also not part of the project" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', - 'msg': "user '%s' not found, and thus also not part of the project" % gitlab_user['name']}) - else: - error = True - changed_users.append("user '%s' not found." % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', - 'msg': "user '%s' not found." % gitlab_user['name']}) - continue - - is_user_a_member = project.is_user_a_member(members, gitlab_user_id) - - # check if the user is a member in the project - if not is_user_a_member: - if state == 'present': - # add user to the project - try: - if not module.check_mode: - project.add_member_to_project(gitlab_user_id, gitlab_project_id, gitlab_user['access_level']) - changed = True - changed_users.append("Successfully added user '%s' to project" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', - 'msg': "Successfully added user '%s' to project" % gitlab_user['name']}) - except (gitlab.exceptions.GitlabCreateError) as e: - error = True - changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', - 'msg': "Not allowed to add the access level for the member, %s: %s" % (gitlab_user['name'], e)}) - # state as absent - else: - changed_users.append("User, '%s', is not a member in the project. No change to report" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', - 'msg': "User, '%s', is not a member in the project. No change to report" % gitlab_user['name']}) - # in case that a user is a member - else: - if state == 'present': - # compare the access level - user_access_level = project.get_user_access_level(members, gitlab_user_id) - if user_access_level == gitlab_user['access_level']: - changed_users.append("User, '%s', is already a member in the project. No change to report" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', - 'msg': "User, '%s', is already a member in the project. No change to report" % gitlab_user['name']}) - else: - # update the access level for the user - try: - if not module.check_mode: - project.update_user_access_level(members, gitlab_user_id, gitlab_user['access_level']) - changed = True - changed_users.append("Successfully updated the access level for the user, '%s'" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', - 'msg': "Successfully updated the access level for the user, '%s'" % gitlab_user['name']}) - except (gitlab.exceptions.GitlabUpdateError) as e: - error = True - changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', - 'msg': "Not allowed to update the access level for the member, %s: %s" % (gitlab_user['name'], e)}) - else: - # remove the user from the project - try: - if not module.check_mode: - project.remove_user_from_project(gitlab_user_id, gitlab_project_id) - changed = True - changed_users.append("Successfully removed user, '%s', from the project" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', - 'msg': "Successfully removed user, '%s', from the project" % gitlab_user['name']}) - except (gitlab.exceptions.GitlabDeleteError) as e: - error = True - changed_users.append("Failed to removed user, '%s', from the project" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', - 'msg': "Failed to remove user, '%s' from the project: %s" % (gitlab_user['name'], e)}) - - # if state = present and purge_users set delete users which are in members having give access level but not in gitlab_users - if state == 'present' and purge_users: - uppercase_names_in_gitlab_users_access = [] - for name in gitlab_users_access: - uppercase_names_in_gitlab_users_access.append(name['name'].upper()) - - for member in members: - if member.access_level in purge_users and member.username.upper() not in uppercase_names_in_gitlab_users_access: - try: - if not module.check_mode: - project.remove_user_from_project(member.id, gitlab_project_id) - changed = True - changed_users.append("Successfully removed user '%s', from project. Was not in given list" % member.username) - changed_data.append({'gitlab_user': member.username, 'result': 'CHANGED', - 'msg': "Successfully removed user '%s', from project. Was not in given list" % member.username}) - except (gitlab.exceptions.GitlabDeleteError) as e: - error = True - changed_users.append("Failed to removed user, '%s', from the project" % gitlab_user['name']) - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', - 'msg': "Failed to remove user, '%s' from the project: %s" % (gitlab_user['name'], e)}) - - if len(gitlab_users_access) == 1 and error: - # if single user given and an error occurred return error for list errors will be per user - module.fail_json(msg="FAILED: '%s '" % changed_users[0], result_data=changed_data) - elif error: - module.fail_json( - msg='FAILED: At least one given user/permission could not be set', result_data=changed_data) - - module.exit_json(changed=changed, msg='Successfully set memberships', result="\n".join(changed_users), result_data=changed_data) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_project_variable.py b/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_project_variable.py deleted file mode 100644 index f9b8d7b6..00000000 --- a/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_project_variable.py +++ /dev/null @@ -1,479 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Markus Bergholz (markuman@gmail.com) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' -module: gitlab_project_variable -short_description: Creates/updates/deletes GitLab Projects Variables -description: - - When a project variable does not exist, it will be created. - - When a project variable does exist, its value will be updated when the values are different. - - Variables which are untouched in the playbook, but are not untouched in the GitLab project, - they stay untouched (I(purge) is C(false)) or will be deleted (I(purge) is C(true)). -author: - - "Markus Bergholz (@markuman)" -requirements: - - python >= 2.7 - - python-gitlab python module -extends_documentation_fragment: - - community.general.auth_basic - - community.general.gitlab - -options: - state: - description: - - Create or delete project variable. - - Possible values are present and absent. - default: present - type: str - choices: ["present", "absent"] - project: - description: - - The path and name of the project. - required: true - type: str - purge: - description: - - When set to true, all variables which are not untouched in the task will be deleted. - default: false - type: bool - vars: - description: - - When the list element is a simple key-value pair, masked and protected will be set to false. - - When the list element is a dict with the keys I(value), I(masked) and I(protected), the user can - have full control about whether a value should be masked, protected or both. - - Support for protected values requires GitLab >= 9.3. - - Support for masked values requires GitLab >= 11.10. - - Support for environment_scope requires GitLab Premium >= 13.11. - - Support for variable_type requires GitLab >= 11.11. - - A I(value) must be a string or a number. - - Field I(variable_type) must be a string with either C(env_var), which is the default, or C(file). - - Field I(environment_scope) must be a string defined by scope environment. - - When a value is masked, it must be in Base64 and have a length of at least 8 characters. - See GitLab documentation on acceptable values for a masked variable (https://docs.gitlab.com/ce/ci/variables/#masked-variables). - default: {} - type: dict - variables: - version_added: 4.4.0 - description: - - A list of dictionaries that represents CI/CD variables. - - This module works internal with this structure, even if the older I(vars) parameter is used. - default: [] - type: list - elements: dict - suboptions: - name: - description: - - The name of the variable. - type: str - required: true - value: - description: - - The variable value. - - Required when I(state=present). - type: str - masked: - description: - - Wether variable value is masked or not. - - Support for masked values requires GitLab >= 11.10. - type: bool - default: false - protected: - description: - - Wether variable value is protected or not. - - Support for protected values requires GitLab >= 9.3. - type: bool - default: false - variable_type: - description: - - Wether a variable is an environment variable (C(env_var)) or a file (C(file)). - - Support for I(variable_type) requires GitLab >= 11.11. - type: str - choices: ["env_var", "file"] - default: env_var - environment_scope: - description: - - The scope for the variable. - - Support for I(environment_scope) requires GitLab Premium >= 13.11. - type: str - default: '*' -''' - - -EXAMPLES = ''' -- name: Set or update some CI/CD variables - community.general.gitlab_project_variable: - api_url: https://gitlab.com - api_token: secret_access_token - project: markuman/dotfiles - purge: false - variables: - - name: ACCESS_KEY_ID - value: abc123 - - name: SECRET_ACCESS_KEY - value: dassgrfaeui8989 - masked: yes - protected: yes - environment_scope: production - -- name: Set or update some CI/CD variables - community.general.gitlab_project_variable: - api_url: https://gitlab.com - api_token: secret_access_token - project: markuman/dotfiles - purge: false - vars: - ACCESS_KEY_ID: abc123 - SECRET_ACCESS_KEY: - value: 3214cbad - masked: true - protected: true - variable_type: env_var - environment_scope: '*' - -- name: Delete one variable - community.general.gitlab_project_variable: - api_url: https://gitlab.com - api_token: secret_access_token - project: markuman/dotfiles - state: absent - vars: - ACCESS_KEY_ID: abc123 -''' - -RETURN = ''' -project_variable: - description: Four lists of the variablenames which were added, updated, removed or exist. - returned: always - type: dict - contains: - added: - description: A list of variables which were created. - returned: always - type: list - sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']" - untouched: - description: A list of variables which exist. - returned: always - type: list - sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']" - removed: - description: A list of variables which were deleted. - returned: always - type: list - sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']" - updated: - description: A list of variables whose values were changed. - returned: always - type: list - sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']" -''' - -import traceback -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.six import string_types -from ansible.module_utils.six import integer_types - -GITLAB_IMP_ERR = None -try: - import gitlab - HAS_GITLAB_PACKAGE = True -except Exception: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_GITLAB_PACKAGE = False - -from ansible_collections.community.general.plugins.module_utils.gitlab import auth_argument_spec, gitlab_authentication - - -def vars_to_variables(vars, module): - # transform old vars to new variables structure - variables = list() - for item, value in vars.items(): - if (isinstance(value, string_types) or - isinstance(value, (integer_types, float))): - variables.append( - { - "name": item, - "value": str(value), - "masked": False, - "protected": False, - "variable_type": "env_var", - } - ) - - elif isinstance(value, dict): - - new_item = { - "name": item, - "value": value.get('value'), - "masked": value.get('masked'), - "protected": value.get('protected'), - "variable_type": value.get('variable_type'), - } - - if value.get('environment_scope'): - new_item['environment_scope'] = value.get('environment_scope') - - variables.append(new_item) - - else: - module.fail_json(msg="value must be of type string, integer, float or dict") - - return variables - - -class GitlabProjectVariables(object): - - def __init__(self, module, gitlab_instance): - self.repo = gitlab_instance - self.project = self.get_project(module.params['project']) - self._module = module - - def get_project(self, project_name): - return self.repo.projects.get(project_name) - - def list_all_project_variables(self): - page_nb = 1 - variables = [] - vars_page = self.project.variables.list(page=page_nb) - while len(vars_page) > 0: - variables += vars_page - page_nb += 1 - vars_page = self.project.variables.list(page=page_nb) - return variables - - def create_variable(self, var_obj): - if self._module.check_mode: - return True - - var = { - "key": var_obj.get('key'), "value": var_obj.get('value'), - "masked": var_obj.get('masked'), "protected": var_obj.get('protected'), - "variable_type": var_obj.get('variable_type') - } - - if var_obj.get('environment_scope') is not None: - var["environment_scope"] = var_obj.get('environment_scope') - - self.project.variables.create(var) - return True - - def update_variable(self, var_obj): - if self._module.check_mode: - return True - self.delete_variable(var_obj) - self.create_variable(var_obj) - return True - - def delete_variable(self, var_obj): - if self._module.check_mode: - return True - self.project.variables.delete(var_obj.get('key'), filter={'environment_scope': var_obj.get('environment_scope')}) - return True - - -def compare(requested_variables, existing_variables, state): - # we need to do this, because it was determined in a previous version - more or less buggy - # basically it is not necessary and might results in more/other bugs! - # but it is required and only relevant for check mode!! - # logic represents state 'present' when not purge. all other can be derived from that - # untouched => equal in both - # updated => name and scope are equal - # added => name and scope does not exist - untouched = list() - updated = list() - added = list() - - if state == 'present': - existing_key_scope_vars = list() - for item in existing_variables: - existing_key_scope_vars.append({'key': item.get('key'), 'environment_scope': item.get('environment_scope')}) - - for var in requested_variables: - if var in existing_variables: - untouched.append(var) - else: - compare_item = {'key': var.get('name'), 'environment_scope': var.get('environment_scope')} - if compare_item in existing_key_scope_vars: - updated.append(var) - else: - added.append(var) - - return untouched, updated, added - - -def native_python_main(this_gitlab, purge, requested_variables, state, module): - - change = False - return_value = dict(added=list(), updated=list(), removed=list(), untouched=list()) - - gitlab_keys = this_gitlab.list_all_project_variables() - before = [x.attributes for x in gitlab_keys] - - gitlab_keys = this_gitlab.list_all_project_variables() - existing_variables = [x.attributes for x in gitlab_keys] - - # preprocessing:filter out and enrich before compare - for item in existing_variables: - item.pop('project_id') - - for item in requested_variables: - item['key'] = item.pop('name') - item['value'] = str(item.get('value')) - if item.get('protected') is None: - item['protected'] = False - if item.get('masked') is None: - item['masked'] = False - if item.get('environment_scope') is None: - item['environment_scope'] = '*' - if item.get('variable_type') is None: - item['variable_type'] = 'env_var' - - if module.check_mode: - untouched, updated, added = compare(requested_variables, existing_variables, state) - - if state == 'present': - add_or_update = [x for x in requested_variables if x not in existing_variables] - for item in add_or_update: - try: - if this_gitlab.create_variable(item): - return_value['added'].append(item) - - except Exception: - if this_gitlab.update_variable(item): - return_value['updated'].append(item) - - if purge: - # refetch and filter - gitlab_keys = this_gitlab.list_all_project_variables() - existing_variables = [x.attributes for x in gitlab_keys] - for item in existing_variables: - item.pop('project_id') - - remove = [x for x in existing_variables if x not in requested_variables] - for item in remove: - if this_gitlab.delete_variable(item): - return_value['removed'].append(item) - - elif state == 'absent': - # value does not matter on removing variables. - # key and environment scope are sufficient - for item in existing_variables: - item.pop('value') - item.pop('variable_type') - for item in requested_variables: - item.pop('value') - item.pop('variable_type') - - if not purge: - remove_requested = [x for x in requested_variables if x in existing_variables] - for item in remove_requested: - if this_gitlab.delete_variable(item): - return_value['removed'].append(item) - - else: - for item in existing_variables: - if this_gitlab.delete_variable(item): - return_value['removed'].append(item) - - if module.check_mode: - return_value = dict(added=added, updated=updated, removed=return_value['removed'], untouched=untouched) - - if return_value['added'] or return_value['removed'] or return_value['updated']: - change = True - - gitlab_keys = this_gitlab.list_all_project_variables() - after = [x.attributes for x in gitlab_keys] - - return change, return_value, before, after - - -def main(): - argument_spec = basic_auth_argument_spec() - argument_spec.update(auth_argument_spec()) - argument_spec.update( - project=dict(type='str', required=True), - purge=dict(type='bool', required=False, default=False), - vars=dict(type='dict', required=False, default=dict(), no_log=True), - variables=dict(type='list', elements='dict', required=False, default=list(), options=dict( - name=dict(type='str', required=True), - value=dict(type='str', no_log=True), - masked=dict(type='bool', default=False), - protected=dict(type='bool', default=False), - environment_scope=dict(type='str', default='*'), - variable_type=dict(type='str', default='env_var', choices=["env_var", "file"]) - )), - state=dict(type='str', default="present", choices=["absent", "present"]), - ) - - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_token', 'api_job_token'], - ['vars', 'variables'], - ], - required_together=[ - ['api_username', 'api_password'], - ], - required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] - ], - supports_check_mode=True - ) - - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) - - purge = module.params['purge'] - var_list = module.params['vars'] - state = module.params['state'] - - if var_list: - variables = vars_to_variables(var_list, module) - else: - variables = module.params['variables'] - - if state == 'present': - if any(x['value'] is None for x in variables): - module.fail_json(msg='value parameter is required in state present') - - gitlab_instance = gitlab_authentication(module) - - this_gitlab = GitlabProjectVariables(module=module, gitlab_instance=gitlab_instance) - - change, raw_return_value, before, after = native_python_main(this_gitlab, purge, variables, state, module) - - # postprocessing - for item in after: - item.pop('project_id') - item['name'] = item.pop('key') - for item in before: - item.pop('project_id') - item['name'] = item.pop('key') - - untouched_key_name = 'key' - if not module.check_mode: - untouched_key_name = 'name' - raw_return_value['untouched'] = [x for x in before if x in after] - - added = [x.get('key') for x in raw_return_value['added']] - updated = [x.get('key') for x in raw_return_value['updated']] - removed = [x.get('key') for x in raw_return_value['removed']] - untouched = [x.get(untouched_key_name) for x in raw_return_value['untouched']] - return_value = dict(added=added, updated=updated, removed=removed, untouched=untouched) - - module.exit_json(changed=change, project_variable=return_value) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_protected_branch.py b/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_protected_branch.py deleted file mode 100644 index fe8e98a3..00000000 --- a/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_protected_branch.py +++ /dev/null @@ -1,201 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Werner Dijkerman (ikben@werner-dijkerman.nl) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' -module: gitlab_protected_branch -short_description: (un)Marking existing branches for protection -version_added: 3.4.0 -description: - - (un)Marking existing branches for protection. -author: - - "Werner Dijkerman (@dj-wasabi)" -requirements: - - python >= 2.7 - - python-gitlab >= 2.3.0 -extends_documentation_fragment: - - community.general.auth_basic - - community.general.gitlab - -options: - state: - description: - - Create or delete proteced branch. - default: present - type: str - choices: ["present", "absent"] - project: - description: - - The path and name of the project. - required: true - type: str - name: - description: - - The name of the branch that needs to be protected. - - Can make use a wildcard charachter for like C(production/*) or just have C(main) or C(develop) as value. - required: true - type: str - merge_access_levels: - description: - - Access levels allowed to merge. - default: maintainer - type: str - choices: ["maintainer", "developer", "nobody"] - push_access_level: - description: - - Access levels allowed to push. - default: maintainer - type: str - choices: ["maintainer", "developer", "nobody"] -''' - - -EXAMPLES = ''' -- name: Create protected branch on main - community.general.gitlab_protected_branch: - api_url: https://gitlab.com - api_token: secret_access_token - project: "dj-wasabi/collection.general" - name: main - merge_access_levels: maintainer - push_access_level: nobody - -''' - -RETURN = ''' -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.api import basic_auth_argument_spec - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -GITLAB_IMP_ERR = None -try: - import gitlab - HAS_GITLAB_PACKAGE = True -except Exception: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_GITLAB_PACKAGE = False - -from ansible_collections.community.general.plugins.module_utils.gitlab import auth_argument_spec, gitlab_authentication - - -class GitlabProtectedBranch(object): - - def __init__(self, module, project, gitlab_instance): - self.repo = gitlab_instance - self._module = module - self.project = self.get_project(project) - self.ACCESS_LEVEL = { - 'nobody': gitlab.NO_ACCESS, - 'developer': gitlab.DEVELOPER_ACCESS, - 'maintainer': gitlab.MAINTAINER_ACCESS - } - - def get_project(self, project_name): - return self.repo.projects.get(project_name) - - def protected_branch_exist(self, name): - try: - return self.project.protectedbranches.get(name) - except Exception as e: - return False - - def create_protected_branch(self, name, merge_access_levels, push_access_level): - if self._module.check_mode: - return True - merge = self.ACCESS_LEVEL[merge_access_levels] - push = self.ACCESS_LEVEL[push_access_level] - self.project.protectedbranches.create({ - 'name': name, - 'merge_access_level': merge, - 'push_access_level': push - }) - - def compare_protected_branch(self, name, merge_access_levels, push_access_level): - configured_merge = self.ACCESS_LEVEL[merge_access_levels] - configured_push = self.ACCESS_LEVEL[push_access_level] - current = self.protected_branch_exist(name=name) - current_merge = current.merge_access_levels[0]['access_level'] - current_push = current.push_access_levels[0]['access_level'] - if current: - if current.name == name and current_merge == configured_merge and current_push == configured_push: - return True - return False - - def delete_protected_branch(self, name): - if self._module.check_mode: - return True - return self.project.protectedbranches.delete(name) - - -def main(): - argument_spec = basic_auth_argument_spec() - argument_spec.update(auth_argument_spec()) - argument_spec.update( - project=dict(type='str', required=True), - name=dict(type='str', required=True), - merge_access_levels=dict(type='str', default="maintainer", choices=["maintainer", "developer", "nobody"]), - push_access_level=dict(type='str', default="maintainer", choices=["maintainer", "developer", "nobody"]), - state=dict(type='str', default="present", choices=["absent", "present"]), - ) - - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_token', 'api_job_token'], - ], - required_together=[ - ['api_username', 'api_password'], - ], - required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] - ], - supports_check_mode=True - ) - - project = module.params['project'] - name = module.params['name'] - merge_access_levels = module.params['merge_access_levels'] - push_access_level = module.params['push_access_level'] - state = module.params['state'] - - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) - - gitlab_version = gitlab.__version__ - if LooseVersion(gitlab_version) < LooseVersion('2.3.0'): - module.fail_json(msg="community.general.gitlab_proteched_branch requires python-gitlab Python module >= 2.3.0 (installed version: [%s])." - " Please upgrade python-gitlab to version 2.3.0 or above." % gitlab_version) - - gitlab_instance = gitlab_authentication(module) - this_gitlab = GitlabProtectedBranch(module=module, project=project, gitlab_instance=gitlab_instance) - - p_branch = this_gitlab.protected_branch_exist(name=name) - if not p_branch and state == "present": - this_gitlab.create_protected_branch(name=name, merge_access_levels=merge_access_levels, push_access_level=push_access_level) - module.exit_json(changed=True, msg="Created the proteched branch.") - elif p_branch and state == "present": - if not this_gitlab.compare_protected_branch(name, merge_access_levels, push_access_level): - this_gitlab.delete_protected_branch(name=name) - this_gitlab.create_protected_branch(name=name, merge_access_levels=merge_access_levels, push_access_level=push_access_level) - module.exit_json(changed=True, msg="Recreated the proteched branch.") - elif p_branch and state == "absent": - this_gitlab.delete_protected_branch(name=name) - module.exit_json(changed=True, msg="Deleted the proteched branch.") - module.exit_json(changed=False, msg="No changes are needed.") - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_runner.py b/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_runner.py deleted file mode 100644 index c31030ab..00000000 --- a/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_runner.py +++ /dev/null @@ -1,415 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Raphaël Droz (raphael.droz@gmail.com) -# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) -# Copyright: (c) 2018, Samy Coenen -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: gitlab_runner -short_description: Create, modify and delete GitLab Runners. -description: - - Register, update and delete runners with the GitLab API. - - All operations are performed using the GitLab API v4. - - For details, consult the full API documentation at U(https://docs.gitlab.com/ee/api/runners.html). - - A valid private API token is required for all operations. You can create as many tokens as you like using the GitLab web interface at - U(https://$GITLAB_URL/profile/personal_access_tokens). - - A valid registration token is required for registering a new runner. - To create shared runners, you need to ask your administrator to give you this token. - It can be found at U(https://$GITLAB_URL/admin/runners/). -notes: - - To create a new runner at least the C(api_token), C(description) and C(api_url) options are required. - - Runners need to have unique descriptions. -author: - - Samy Coenen (@SamyCoenen) - - Guillaume Martinez (@Lunik) -requirements: - - python >= 2.7 - - python-gitlab >= 1.5.0 -extends_documentation_fragment: - - community.general.auth_basic - - community.general.gitlab - -options: - project: - description: - - ID or full path of the project in the form of group/name. - - Mutually exclusive with I(owned) since community.general 4.5.0. - type: str - version_added: '3.7.0' - description: - description: - - The unique name of the runner. - required: True - type: str - aliases: - - name - state: - description: - - Make sure that the runner with the same name exists with the same configuration or delete the runner with the same name. - required: False - default: present - choices: ["present", "absent"] - type: str - registration_token: - description: - - The registration token is used to register new runners. - - Required if I(state) is C(present). - type: str - owned: - description: - - Searches only runners available to the user when searching for existing, when false admin token required. - - Mutually exclusive with I(project) since community.general 4.5.0. - default: no - type: bool - version_added: 2.0.0 - active: - description: - - Define if the runners is immediately active after creation. - required: False - default: yes - type: bool - locked: - description: - - Determines if the runner is locked or not. - required: False - default: False - type: bool - access_level: - description: - - Determines if a runner can pick up jobs only from protected branches. - - If set to C(ref_protected), runner can pick up jobs only from protected branches. - - If set to C(not_protected), runner can pick up jobs from both protected and unprotected branches. - required: False - default: ref_protected - choices: ["ref_protected", "not_protected"] - type: str - maximum_timeout: - description: - - The maximum time that a runner has to complete a specific job. - required: False - default: 3600 - type: int - run_untagged: - description: - - Run untagged jobs or not. - required: False - default: yes - type: bool - tag_list: - description: The tags that apply to the runner. - required: False - default: [] - type: list - elements: str -''' - -EXAMPLES = ''' -- name: "Register runner" - community.general.gitlab_runner: - api_url: https://gitlab.example.com/ - api_token: "{{ access_token }}" - registration_token: 4gfdsg345 - description: Docker Machine t1 - state: present - active: True - tag_list: ['docker'] - run_untagged: False - locked: False - -- name: "Delete runner" - community.general.gitlab_runner: - api_url: https://gitlab.example.com/ - api_token: "{{ access_token }}" - description: Docker Machine t1 - state: absent - -- name: Delete an owned runner as a non-admin - community.general.gitlab_runner: - api_url: https://gitlab.example.com/ - api_token: "{{ access_token }}" - description: Docker Machine t1 - owned: yes - state: absent - -- name: Register runner for a specific project - community.general.gitlab_runner: - api_url: https://gitlab.example.com/ - api_token: "{{ access_token }}" - registration_token: 4gfdsg345 - description: MyProject runner - state: present - project: mygroup/mysubgroup/myproject -''' - -RETURN = ''' -msg: - description: Success or failure message - returned: always - type: str - sample: "Success" - -result: - description: json parsed response from the server - returned: always - type: dict - -error: - description: the error message returned by the GitLab API - returned: failed - type: str - sample: "400: path is already in use" - -runner: - description: API object - returned: always - type: dict -''' - -import traceback - -GITLAB_IMP_ERR = None -try: - import gitlab - HAS_GITLAB_PACKAGE = True -except Exception: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_GITLAB_PACKAGE = False - -from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - -from ansible_collections.community.general.plugins.module_utils.gitlab import auth_argument_spec, gitlab_authentication - -try: - cmp -except NameError: - def cmp(a, b): - return (a > b) - (a < b) - - -class GitLabRunner(object): - def __init__(self, module, gitlab_instance, project=None): - self._module = module - self._gitlab = gitlab_instance - # Whether to operate on GitLab-instance-wide or project-wide runners - # See https://gitlab.com/gitlab-org/gitlab-ce/issues/60774 - # for group runner token access - if project: - self._runners_endpoint = project.runners.list - elif module.params['owned']: - self._runners_endpoint = gitlab_instance.runners.list - else: - self._runners_endpoint = gitlab_instance.runners.all - - self.runner_object = None - - def create_or_update_runner(self, description, options): - changed = False - - # Because we have already call userExists in main() - if self.runner_object is None: - runner = self.create_runner({ - 'description': description, - 'active': options['active'], - 'token': options['registration_token'], - 'locked': options['locked'], - 'run_untagged': options['run_untagged'], - 'maximum_timeout': options['maximum_timeout'], - 'tag_list': options['tag_list'], - }) - changed = True - else: - changed, runner = self.update_runner(self.runner_object, { - 'active': options['active'], - 'locked': options['locked'], - 'run_untagged': options['run_untagged'], - 'maximum_timeout': options['maximum_timeout'], - 'access_level': options['access_level'], - 'tag_list': options['tag_list'], - }) - - self.runner_object = runner - if changed: - if self._module.check_mode: - self._module.exit_json(changed=True, msg="Successfully created or updated the runner %s" % description) - - try: - runner.save() - except Exception as e: - self._module.fail_json(msg="Failed to update runner: %s " % to_native(e)) - return True - else: - return False - - ''' - @param arguments Attributes of the runner - ''' - def create_runner(self, arguments): - if self._module.check_mode: - return True - - try: - runner = self._gitlab.runners.create(arguments) - except (gitlab.exceptions.GitlabCreateError) as e: - self._module.fail_json(msg="Failed to create runner: %s " % to_native(e)) - - return runner - - ''' - @param runner Runner object - @param arguments Attributes of the runner - ''' - def update_runner(self, runner, arguments): - changed = False - - for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - if isinstance(arguments[arg_key], list): - list1 = getattr(runner, arg_key) - list1.sort() - list2 = arguments[arg_key] - list2.sort() - if cmp(list1, list2): - setattr(runner, arg_key, arguments[arg_key]) - changed = True - else: - if getattr(runner, arg_key) != arguments[arg_key]: - setattr(runner, arg_key, arguments[arg_key]) - changed = True - - return (changed, runner) - - ''' - @param description Description of the runner - ''' - def find_runner(self, description): - runners = self._runners_endpoint(as_list=False) - - for runner in runners: - # python-gitlab 2.2 through at least 2.5 returns a list of dicts for list() instead of a Runner - # object, so we need to handle both - if hasattr(runner, "description"): - if (runner.description == description): - return self._gitlab.runners.get(runner.id) - else: - if (runner['description'] == description): - return self._gitlab.runners.get(runner['id']) - - ''' - @param description Description of the runner - ''' - def exists_runner(self, description): - # When runner exists, object will be stored in self.runner_object. - runner = self.find_runner(description) - - if runner: - self.runner_object = runner - return True - return False - - def delete_runner(self): - if self._module.check_mode: - return True - - runner = self.runner_object - - return runner.delete() - - -def main(): - argument_spec = basic_auth_argument_spec() - argument_spec.update(auth_argument_spec()) - argument_spec.update(dict( - description=dict(type='str', required=True, aliases=["name"]), - active=dict(type='bool', default=True), - owned=dict(type='bool', default=False), - tag_list=dict(type='list', elements='str', default=[]), - run_untagged=dict(type='bool', default=True), - locked=dict(type='bool', default=False), - access_level=dict(type='str', default='ref_protected', choices=["not_protected", "ref_protected"]), - maximum_timeout=dict(type='int', default=3600), - registration_token=dict(type='str', no_log=True), - project=dict(type='str'), - state=dict(type='str', default="present", choices=["absent", "present"]), - )) - - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_token', 'api_job_token'], - ['project', 'owned'], - ], - required_together=[ - ['api_username', 'api_password'], - ], - required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'], - ], - required_if=[ - ('state', 'present', ['registration_token']), - ], - supports_check_mode=True, - ) - - state = module.params['state'] - runner_description = module.params['description'] - runner_active = module.params['active'] - tag_list = module.params['tag_list'] - run_untagged = module.params['run_untagged'] - runner_locked = module.params['locked'] - access_level = module.params['access_level'] - maximum_timeout = module.params['maximum_timeout'] - registration_token = module.params['registration_token'] - project = module.params['project'] - - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) - - gitlab_instance = gitlab_authentication(module) - gitlab_project = None - if project: - try: - gitlab_project = gitlab_instance.projects.get(project) - except gitlab.exceptions.GitlabGetError as e: - module.fail_json(msg='No such a project %s' % project, exception=to_native(e)) - - gitlab_runner = GitLabRunner(module, gitlab_instance, gitlab_project) - runner_exists = gitlab_runner.exists_runner(runner_description) - - if state == 'absent': - if runner_exists: - gitlab_runner.delete_runner() - module.exit_json(changed=True, msg="Successfully deleted runner %s" % runner_description) - else: - module.exit_json(changed=False, msg="Runner deleted or does not exists") - - if state == 'present': - if gitlab_runner.create_or_update_runner(runner_description, { - "active": runner_active, - "tag_list": tag_list, - "run_untagged": run_untagged, - "locked": runner_locked, - "access_level": access_level, - "maximum_timeout": maximum_timeout, - "registration_token": registration_token, - }): - module.exit_json(changed=True, runner=gitlab_runner.runner_object._attrs, - msg="Successfully created or updated the runner %s" % runner_description) - else: - module.exit_json(changed=False, runner=gitlab_runner.runner_object._attrs, - msg="No need to update the runner %s" % runner_description) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_user.py b/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_user.py deleted file mode 100644 index d56e553c..00000000 --- a/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_user.py +++ /dev/null @@ -1,693 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Lennert Mertens (lennert@nubera.be) -# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) -# Copyright: (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: gitlab_user -short_description: Creates/updates/deletes/blocks/unblocks GitLab Users -description: - - When the user does not exist in GitLab, it will be created. - - When the user exists and state=absent, the user will be deleted. - - When the user exists and state=blocked, the user will be blocked. - - When changes are made to user, the user will be updated. -notes: - - From community.general 0.2.0 and onwards, name, email and password are optional while deleting the user. -author: - - Werner Dijkerman (@dj-wasabi) - - Guillaume Martinez (@Lunik) - - Lennert Mertens (@LennertMertens) - - Stef Graces (@stgrace) -requirements: - - python >= 2.7 - - python-gitlab python module - - administrator rights on the GitLab server -extends_documentation_fragment: - - community.general.auth_basic - - community.general.gitlab - -options: - name: - description: - - Name of the user you want to create. - - Required only if C(state) is set to C(present). - type: str - username: - description: - - The username of the user. - required: true - type: str - password: - description: - - The password of the user. - - GitLab server enforces minimum password length to 8, set this value with 8 or more characters. - - Required only if C(state) is set to C(present). - type: str - reset_password: - description: - - Whether the user can change its password or not. - default: false - type: bool - version_added: 3.3.0 - email: - description: - - The email that belongs to the user. - - Required only if C(state) is set to C(present). - type: str - sshkey_name: - description: - - The name of the SSH public key. - type: str - sshkey_file: - description: - - The SSH public key itself. - type: str - sshkey_expires_at: - description: - - The expiration date of the SSH public key in ISO 8601 format C(YYYY-MM-DDTHH:MM:SSZ). - - This is only used when adding new SSH public keys. - type: str - version_added: 3.1.0 - group: - description: - - Id or Full path of parent group in the form of group/name. - - Add user as a member to this group. - type: str - access_level: - description: - - The access level to the group. One of the following can be used. - - guest - - reporter - - developer - - master (alias for maintainer) - - maintainer - - owner - default: guest - type: str - choices: ["guest", "reporter", "developer", "master", "maintainer", "owner"] - state: - description: - - Create, delete or block a user. - default: present - type: str - choices: ["present", "absent", "blocked", "unblocked"] - confirm: - description: - - Require confirmation. - type: bool - default: yes - isadmin: - description: - - Grant admin privileges to the user. - type: bool - default: no - external: - description: - - Define external parameter for this user. - type: bool - default: no - identities: - description: - - List of identities to be added/updated for this user. - - To remove all other identities from this user, set I(overwrite_identities=true). - type: list - elements: dict - suboptions: - provider: - description: - - The name of the external identity provider - type: str - extern_uid: - description: - - User ID for external identity. - type: str - version_added: 3.3.0 - overwrite_identities: - description: - - Overwrite identities with identities added in this module. - - This means that all identities that the user has and that are not listed in I(identities) are removed from the user. - - This is only done if a list is provided for I(identities). To remove all identities, provide an empty list. - type: bool - default: false - version_added: 3.3.0 -''' - -EXAMPLES = ''' -- name: "Delete GitLab User" - community.general.gitlab_user: - api_url: https://gitlab.example.com/ - api_token: "{{ access_token }}" - validate_certs: False - username: myusername - state: absent - -- name: "Create GitLab User" - community.general.gitlab_user: - api_url: https://gitlab.example.com/ - validate_certs: True - api_username: dj-wasabi - api_password: "MySecretPassword" - name: My Name - username: myusername - password: mysecretpassword - email: me@example.com - sshkey_name: MySSH - sshkey_file: ssh-rsa AAAAB3NzaC1yc... - state: present - group: super_group/mon_group - access_level: owner - -- name: "Create GitLab User using external identity provider" - community.general.gitlab_user: - api_url: https://gitlab.example.com/ - validate_certs: True - api_token: "{{ access_token }}" - name: My Name - username: myusername - password: mysecretpassword - email: me@example.com - identities: - - provider: Keycloak - extern_uid: f278f95c-12c7-4d51-996f-758cc2eb11bc - state: present - group: super_group/mon_group - access_level: owner - -- name: "Block GitLab User" - community.general.gitlab_user: - api_url: https://gitlab.example.com/ - api_token: "{{ access_token }}" - validate_certs: False - username: myusername - state: blocked - -- name: "Unblock GitLab User" - community.general.gitlab_user: - api_url: https://gitlab.example.com/ - api_token: "{{ access_token }}" - validate_certs: False - username: myusername - state: unblocked -''' - -RETURN = ''' -msg: - description: Success or failure message - returned: always - type: str - sample: "Success" - -result: - description: json parsed response from the server - returned: always - type: dict - -error: - description: the error message returned by the GitLab API - returned: failed - type: str - sample: "400: path is already in use" - -user: - description: API object - returned: always - type: dict -''' - -import traceback - -GITLAB_IMP_ERR = None -try: - import gitlab - HAS_GITLAB_PACKAGE = True -except Exception: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_GITLAB_PACKAGE = False - -from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - -from ansible_collections.community.general.plugins.module_utils.gitlab import auth_argument_spec, find_group, gitlab_authentication - - -class GitLabUser(object): - def __init__(self, module, gitlab_instance): - self._module = module - self._gitlab = gitlab_instance - self.user_object = None - self.ACCESS_LEVEL = { - 'guest': gitlab.GUEST_ACCESS, - 'reporter': gitlab.REPORTER_ACCESS, - 'developer': gitlab.DEVELOPER_ACCESS, - 'master': gitlab.MAINTAINER_ACCESS, - 'maintainer': gitlab.MAINTAINER_ACCESS, - 'owner': gitlab.OWNER_ACCESS, - } - - ''' - @param username Username of the user - @param options User options - ''' - def create_or_update_user(self, username, options): - changed = False - potentionally_changed = False - - # Because we have already call userExists in main() - if self.user_object is None: - user = self.create_user({ - 'name': options['name'], - 'username': username, - 'password': options['password'], - 'reset_password': options['reset_password'], - 'email': options['email'], - 'skip_confirmation': not options['confirm'], - 'admin': options['isadmin'], - 'external': options['external'], - 'identities': options['identities'], - }) - changed = True - else: - changed, user = self.update_user( - self.user_object, { - # add "normal" parameters here, put uncheckable - # params in the dict below - 'name': {'value': options['name']}, - 'email': {'value': options['email']}, - - # note: for some attributes like this one the key - # from reading back from server is unfortunately - # different to the one needed for pushing/writing, - # in that case use the optional setter key - 'is_admin': { - 'value': options['isadmin'], 'setter': 'admin' - }, - 'external': {'value': options['external']}, - 'identities': {'value': options['identities']}, - }, - { - # put "uncheckable" params here, this means params - # which the gitlab does accept for setting but does - # not return any information about it - 'skip_reconfirmation': {'value': not options['confirm']}, - 'password': {'value': options['password']}, - 'reset_password': {'value': options['reset_password']}, - 'overwrite_identities': {'value': options['overwrite_identities']}, - } - ) - - # note: as we unfortunately have some uncheckable parameters - # where it is not possible to determine if the update - # changed something or not, we must assume here that a - # changed happend and that an user object update is needed - potentionally_changed = True - - # Assign ssh keys - if options['sshkey_name'] and options['sshkey_file']: - key_changed = self.add_ssh_key_to_user(user, { - 'name': options['sshkey_name'], - 'file': options['sshkey_file'], - 'expires_at': options['sshkey_expires_at']}) - changed = changed or key_changed - - # Assign group - if options['group_path']: - group_changed = self.assign_user_to_group(user, options['group_path'], options['access_level']) - changed = changed or group_changed - - self.user_object = user - if (changed or potentionally_changed) and not self._module.check_mode: - try: - user.save() - except Exception as e: - self._module.fail_json(msg="Failed to update user: %s " % to_native(e)) - - if changed: - if self._module.check_mode: - self._module.exit_json(changed=True, msg="Successfully created or updated the user %s" % username) - return True - else: - return False - - ''' - @param group User object - ''' - def get_user_id(self, user): - if user is not None: - return user.id - return None - - ''' - @param user User object - @param sshkey_name Name of the ssh key - ''' - def ssh_key_exists(self, user, sshkey_name): - keyList = map(lambda k: k.title, user.keys.list()) - - return sshkey_name in keyList - - ''' - @param user User object - @param sshkey Dict containing sshkey infos {"name": "", "file": "", "expires_at": ""} - ''' - def add_ssh_key_to_user(self, user, sshkey): - if not self.ssh_key_exists(user, sshkey['name']): - if self._module.check_mode: - return True - - try: - parameter = { - 'title': sshkey['name'], - 'key': sshkey['file'], - } - if sshkey['expires_at'] is not None: - parameter['expires_at'] = sshkey['expires_at'] - user.keys.create(parameter) - except gitlab.exceptions.GitlabCreateError as e: - self._module.fail_json(msg="Failed to assign sshkey to user: %s" % to_native(e)) - return True - return False - - ''' - @param group Group object - @param user_id Id of the user to find - ''' - def find_member(self, group, user_id): - try: - member = group.members.get(user_id) - except gitlab.exceptions.GitlabGetError: - return None - return member - - ''' - @param group Group object - @param user_id Id of the user to check - ''' - def member_exists(self, group, user_id): - member = self.find_member(group, user_id) - - return member is not None - - ''' - @param group Group object - @param user_id Id of the user to check - @param access_level GitLab access_level to check - ''' - def member_as_good_access_level(self, group, user_id, access_level): - member = self.find_member(group, user_id) - - return member.access_level == access_level - - ''' - @param user User object - @param group_path Complete path of the Group including parent group path. / - @param access_level GitLab access_level to assign - ''' - def assign_user_to_group(self, user, group_identifier, access_level): - group = find_group(self._gitlab, group_identifier) - - if self._module.check_mode: - return True - - if group is None: - return False - - if self.member_exists(group, self.get_user_id(user)): - member = self.find_member(group, self.get_user_id(user)) - if not self.member_as_good_access_level(group, member.id, self.ACCESS_LEVEL[access_level]): - member.access_level = self.ACCESS_LEVEL[access_level] - member.save() - return True - else: - try: - group.members.create({ - 'user_id': self.get_user_id(user), - 'access_level': self.ACCESS_LEVEL[access_level]}) - except gitlab.exceptions.GitlabCreateError as e: - self._module.fail_json(msg="Failed to assign user to group: %s" % to_native(e)) - return True - return False - - ''' - @param user User object - @param arguments User attributes - ''' - def update_user(self, user, arguments, uncheckable_args): - changed = False - - for arg_key, arg_value in arguments.items(): - av = arg_value['value'] - - if av is not None: - if arg_key == "identities": - changed = self.add_identities(user, av, uncheckable_args['overwrite_identities']['value']) - - elif getattr(user, arg_key) != av: - setattr(user, arg_value.get('setter', arg_key), av) - changed = True - - for arg_key, arg_value in uncheckable_args.items(): - av = arg_value['value'] - - if av is not None: - setattr(user, arg_value.get('setter', arg_key), av) - - return (changed, user) - - ''' - @param arguments User attributes - ''' - def create_user(self, arguments): - if self._module.check_mode: - return True - - identities = None - if 'identities' in arguments: - identities = arguments['identities'] - del arguments['identities'] - - try: - user = self._gitlab.users.create(arguments) - if identities: - self.add_identities(user, identities) - - except (gitlab.exceptions.GitlabCreateError) as e: - self._module.fail_json(msg="Failed to create user: %s " % to_native(e)) - - return user - - ''' - @param user User object - @param identites List of identities to be added/updated - @param overwrite_identities Overwrite user identities with identities passed to this module - ''' - def add_identities(self, user, identities, overwrite_identities=False): - changed = False - if overwrite_identities: - changed = self.delete_identities(user, identities) - - for identity in identities: - if identity not in user.identities: - setattr(user, 'provider', identity['provider']) - setattr(user, 'extern_uid', identity['extern_uid']) - if not self._module.check_mode: - user.save() - changed = True - return changed - - ''' - @param user User object - @param identites List of identities to be added/updated - ''' - def delete_identities(self, user, identities): - changed = False - for identity in user.identities: - if identity not in identities: - if not self._module.check_mode: - user.identityproviders.delete(identity['provider']) - changed = True - return changed - - ''' - @param username Username of the user - ''' - def find_user(self, username): - users = self._gitlab.users.list(search=username) - for user in users: - if (user.username == username): - return user - - ''' - @param username Username of the user - ''' - def exists_user(self, username): - # When user exists, object will be stored in self.user_object. - user = self.find_user(username) - if user: - self.user_object = user - return True - return False - - ''' - @param username Username of the user - ''' - def is_active(self, username): - user = self.find_user(username) - return user.attributes['state'] == 'active' - - def delete_user(self): - if self._module.check_mode: - return True - - user = self.user_object - - return user.delete() - - def block_user(self): - if self._module.check_mode: - return True - - user = self.user_object - - return user.block() - - def unblock_user(self): - if self._module.check_mode: - return True - - user = self.user_object - - return user.unblock() - - -def sanitize_arguments(arguments): - for key, value in list(arguments.items()): - if value is None: - del arguments[key] - return arguments - - -def main(): - argument_spec = basic_auth_argument_spec() - argument_spec.update(auth_argument_spec()) - argument_spec.update(dict( - name=dict(type='str'), - state=dict(type='str', default="present", choices=["absent", "present", "blocked", "unblocked"]), - username=dict(type='str', required=True), - password=dict(type='str', no_log=True), - reset_password=dict(type='bool', default=False, no_log=False), - email=dict(type='str'), - sshkey_name=dict(type='str'), - sshkey_file=dict(type='str', no_log=False), - sshkey_expires_at=dict(type='str', no_log=False), - group=dict(type='str'), - access_level=dict(type='str', default="guest", choices=["developer", "guest", "maintainer", "master", "owner", "reporter"]), - confirm=dict(type='bool', default=True), - isadmin=dict(type='bool', default=False), - external=dict(type='bool', default=False), - identities=dict(type='list', elements='dict'), - overwrite_identities=dict(type='bool', default=False), - )) - - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_token', 'api_job_token'], - ], - required_together=[ - ['api_username', 'api_password'], - ], - required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] - ], - supports_check_mode=True, - required_if=( - ('state', 'present', ['name', 'email']), - ) - ) - - user_name = module.params['name'] - state = module.params['state'] - user_username = module.params['username'].lower() - user_password = module.params['password'] - user_reset_password = module.params['reset_password'] - user_email = module.params['email'] - user_sshkey_name = module.params['sshkey_name'] - user_sshkey_file = module.params['sshkey_file'] - user_sshkey_expires_at = module.params['sshkey_expires_at'] - group_path = module.params['group'] - access_level = module.params['access_level'] - confirm = module.params['confirm'] - user_isadmin = module.params['isadmin'] - user_external = module.params['external'] - user_identities = module.params['identities'] - overwrite_identities = module.params['overwrite_identities'] - - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) - - gitlab_instance = gitlab_authentication(module) - - gitlab_user = GitLabUser(module, gitlab_instance) - user_exists = gitlab_user.exists_user(user_username) - if user_exists: - user_is_active = gitlab_user.is_active(user_username) - else: - user_is_active = False - - if state == 'absent': - if user_exists: - gitlab_user.delete_user() - module.exit_json(changed=True, msg="Successfully deleted user %s" % user_username) - else: - module.exit_json(changed=False, msg="User deleted or does not exists") - - if state == 'blocked': - if user_exists and user_is_active: - gitlab_user.block_user() - module.exit_json(changed=True, msg="Successfully blocked user %s" % user_username) - else: - module.exit_json(changed=False, msg="User already blocked or does not exists") - - if state == 'unblocked': - if user_exists and not user_is_active: - gitlab_user.unblock_user() - module.exit_json(changed=True, msg="Successfully unblocked user %s" % user_username) - else: - module.exit_json(changed=False, msg="User is not blocked or does not exists") - - if state == 'present': - if gitlab_user.create_or_update_user(user_username, { - "name": user_name, - "password": user_password, - "reset_password": user_reset_password, - "email": user_email, - "sshkey_name": user_sshkey_name, - "sshkey_file": user_sshkey_file, - "sshkey_expires_at": user_sshkey_expires_at, - "group_path": group_path, - "access_level": access_level, - "confirm": confirm, - "isadmin": user_isadmin, - "external": user_external, - "identities": user_identities, - "overwrite_identities": overwrite_identities, - }): - module.exit_json(changed=True, msg="Successfully created or updated the user %s" % user_username, user=gitlab_user.user_object._attrs) - else: - module.exit_json(changed=False, msg="No need to update the user %s" % user_username, user=gitlab_user.user_object._attrs) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/source_control/hg.py b/ansible_collections/community/general/plugins/modules/source_control/hg.py deleted file mode 100644 index 572b036e..00000000 --- a/ansible_collections/community/general/plugins/modules/source_control/hg.py +++ /dev/null @@ -1,295 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2013, Yeukhon Wong -# Copyright: (c) 2014, Nate Coraor -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: hg -short_description: Manages Mercurial (hg) repositories -description: - - Manages Mercurial (hg) repositories. Supports SSH, HTTP/S and local address. -author: "Yeukhon Wong (@yeukhon)" -options: - repo: - description: - - The repository address. - required: yes - aliases: [ name ] - type: str - dest: - description: - - Absolute path of where the repository should be cloned to. - This parameter is required, unless clone and update are set to no - type: path - revision: - description: - - Equivalent C(-r) option in hg command which could be the changeset, revision number, - branch name or even tag. - aliases: [ version ] - type: str - force: - description: - - Discards uncommitted changes. Runs C(hg update -C). Prior to - 1.9, the default was `yes`. - type: bool - default: 'no' - purge: - description: - - Deletes untracked files. Runs C(hg purge). - type: bool - default: 'no' - update: - description: - - If C(no), do not retrieve new revisions from the origin repository - type: bool - default: 'yes' - clone: - description: - - If C(no), do not clone the repository if it does not exist locally. - type: bool - default: 'yes' - executable: - description: - - Path to hg executable to use. If not supplied, - the normal mechanism for resolving binary paths will be used. - type: str -notes: - - This module does not support push capability. See U(https://github.com/ansible/ansible/issues/31156). - - "If the task seems to be hanging, first verify remote host is in C(known_hosts). - SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt, - one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling - the hg module, with the following command: ssh-keyscan remote_host.com >> /etc/ssh/ssh_known_hosts." - - As per 01 Dec 2018, Bitbucket has dropped support for TLSv1 and TLSv1.1 connections. As such, - if the underlying system still uses a Python version below 2.7.9, you will have issues checking out - bitbucket repositories. See U(https://bitbucket.org/blog/deprecating-tlsv1-tlsv1-1-2018-12-01). -''' - -EXAMPLES = ''' -- name: Ensure the current working copy is inside the stable branch and deletes untracked files if any. - community.general.hg: - repo: https://bitbucket.org/user/repo1 - dest: /home/user/repo1 - revision: stable - purge: yes - -- name: Get information about the repository whether or not it has already been cloned locally. - community.general.hg: - repo: git://bitbucket.org/user/repo - dest: /srv/checkout - clone: no - update: no -''' - -import os - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - - -class Hg(object): - def __init__(self, module, dest, repo, revision, hg_path): - self.module = module - self.dest = dest - self.repo = repo - self.revision = revision - self.hg_path = hg_path - - def _command(self, args_list): - (rc, out, err) = self.module.run_command([self.hg_path] + args_list) - return (rc, out, err) - - def _list_untracked(self): - args = ['purge', '--config', 'extensions.purge=', '-R', self.dest, '--print'] - return self._command(args) - - def get_revision(self): - """ - hg id -b -i -t returns a string in the format: - "[+] " - This format lists the state of the current working copy, - and indicates whether there are uncommitted changes by the - plus sign. Otherwise, the sign is omitted. - - Read the full description via hg id --help - """ - (rc, out, err) = self._command(['id', '-b', '-i', '-t', '-R', self.dest]) - if rc != 0: - self.module.fail_json(msg=err) - else: - return to_native(out).strip('\n') - - def get_remote_revision(self): - (rc, out, err) = self._command(['id', self.repo]) - if rc != 0: - self.module.fail_json(msg=err) - else: - return to_native(out).strip('\n') - - def has_local_mods(self): - now = self.get_revision() - if '+' in now: - return True - else: - return False - - def discard(self): - before = self.has_local_mods() - if not before: - return False - - args = ['update', '-C', '-R', self.dest, '-r', '.'] - (rc, out, err) = self._command(args) - if rc != 0: - self.module.fail_json(msg=err) - - after = self.has_local_mods() - if before != after and not after: # no more local modification - return True - - def purge(self): - # before purge, find out if there are any untracked files - (rc1, out1, err1) = self._list_untracked() - if rc1 != 0: - self.module.fail_json(msg=err1) - - # there are some untrackd files - if out1 != '': - args = ['purge', '--config', 'extensions.purge=', '-R', self.dest] - (rc2, out2, err2) = self._command(args) - if rc2 != 0: - self.module.fail_json(msg=err2) - return True - else: - return False - - def cleanup(self, force, purge): - discarded = False - purged = False - - if force: - discarded = self.discard() - if purge: - purged = self.purge() - if discarded or purged: - return True - else: - return False - - def pull(self): - return self._command( - ['pull', '-R', self.dest, self.repo]) - - def update(self): - if self.revision is not None: - return self._command(['update', '-r', self.revision, '-R', self.dest]) - return self._command(['update', '-R', self.dest]) - - def clone(self): - if self.revision is not None: - return self._command(['clone', self.repo, self.dest, '-r', self.revision]) - return self._command(['clone', self.repo, self.dest]) - - @property - def at_revision(self): - """ - There is no point in pulling from a potentially down/slow remote site - if the desired changeset is already the current changeset. - """ - if self.revision is None or len(self.revision) < 7: - # Assume it's a rev number, tag, or branch - return False - (rc, out, err) = self._command(['--debug', 'id', '-i', '-R', self.dest]) - if rc != 0: - self.module.fail_json(msg=err) - if out.startswith(self.revision): - return True - return False - - -# =========================================== - -def main(): - module = AnsibleModule( - argument_spec=dict( - repo=dict(type='str', required=True, aliases=['name']), - dest=dict(type='path'), - revision=dict(type='str', default=None, aliases=['version']), - force=dict(type='bool', default=False), - purge=dict(type='bool', default=False), - update=dict(type='bool', default=True), - clone=dict(type='bool', default=True), - executable=dict(type='str', default=None), - ), - ) - repo = module.params['repo'] - dest = module.params['dest'] - revision = module.params['revision'] - force = module.params['force'] - purge = module.params['purge'] - update = module.params['update'] - clone = module.params['clone'] - hg_path = module.params['executable'] or module.get_bin_path('hg', True) - if dest is not None: - hgrc = os.path.join(dest, '.hg/hgrc') - - # initial states - before = '' - changed = False - cleaned = False - - if not dest and (clone or update): - module.fail_json(msg="the destination directory must be specified unless clone=no and update=no") - - hg = Hg(module, dest, repo, revision, hg_path) - - # If there is no hgrc file, then assume repo is absent - # and perform clone. Otherwise, perform pull and update. - if not clone and not update: - out = hg.get_remote_revision() - module.exit_json(after=out, changed=False) - if not os.path.exists(hgrc): - if clone: - (rc, out, err) = hg.clone() - if rc != 0: - module.fail_json(msg=err) - else: - module.exit_json(changed=False) - elif not update: - # Just return having found a repo already in the dest path - before = hg.get_revision() - elif hg.at_revision: - # no update needed, don't pull - before = hg.get_revision() - - # but force and purge if desired - cleaned = hg.cleanup(force, purge) - else: - # get the current state before doing pulling - before = hg.get_revision() - - # can perform force and purge - cleaned = hg.cleanup(force, purge) - - (rc, out, err) = hg.pull() - if rc != 0: - module.fail_json(msg=err) - - (rc, out, err) = hg.update() - if rc != 0: - module.fail_json(msg=err) - - after = hg.get_revision() - if before != after or cleaned: - changed = True - - module.exit_json(before=before, after=after, changed=changed, cleaned=cleaned) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/spectrum_device.py b/ansible_collections/community/general/plugins/modules/spectrum_device.py deleted file mode 120000 index 16551f4b..00000000 --- a/ansible_collections/community/general/plugins/modules/spectrum_device.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/spectrum_device.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/spectrum_model_attrs.py b/ansible_collections/community/general/plugins/modules/spectrum_model_attrs.py deleted file mode 120000 index 0156a86a..00000000 --- a/ansible_collections/community/general/plugins/modules/spectrum_model_attrs.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/spectrum_model_attrs.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/spotinst_aws_elastigroup.py b/ansible_collections/community/general/plugins/modules/spotinst_aws_elastigroup.py deleted file mode 120000 index 417ab70c..00000000 --- a/ansible_collections/community/general/plugins/modules/spotinst_aws_elastigroup.py +++ /dev/null @@ -1 +0,0 @@ -cloud/spotinst/spotinst_aws_elastigroup.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ss_3par_cpg.py b/ansible_collections/community/general/plugins/modules/ss_3par_cpg.py deleted file mode 120000 index d65d498c..00000000 --- a/ansible_collections/community/general/plugins/modules/ss_3par_cpg.py +++ /dev/null @@ -1 +0,0 @@ -storage/hpe3par/ss_3par_cpg.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/ssh_config.py b/ansible_collections/community/general/plugins/modules/ssh_config.py deleted file mode 120000 index ea4f374f..00000000 --- a/ansible_collections/community/general/plugins/modules/ssh_config.py +++ /dev/null @@ -1 +0,0 @@ -system/ssh_config.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/stackdriver.py b/ansible_collections/community/general/plugins/modules/stackdriver.py deleted file mode 120000 index 291b82ca..00000000 --- a/ansible_collections/community/general/plugins/modules/stackdriver.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/stackdriver.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/stacki_host.py b/ansible_collections/community/general/plugins/modules/stacki_host.py deleted file mode 120000 index d80c8103..00000000 --- a/ansible_collections/community/general/plugins/modules/stacki_host.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/stacki/stacki_host.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/statsd.py b/ansible_collections/community/general/plugins/modules/statsd.py deleted file mode 120000 index a906f4df..00000000 --- a/ansible_collections/community/general/plugins/modules/statsd.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/statsd.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/statusio_maintenance.py b/ansible_collections/community/general/plugins/modules/statusio_maintenance.py deleted file mode 120000 index 327e5735..00000000 --- a/ansible_collections/community/general/plugins/modules/statusio_maintenance.py +++ /dev/null @@ -1 +0,0 @@ -monitoring/statusio_maintenance.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/storage/emc/emc_vnx_sg_member.py b/ansible_collections/community/general/plugins/modules/storage/emc/emc_vnx_sg_member.py deleted file mode 100644 index 20977687..00000000 --- a/ansible_collections/community/general/plugins/modules/storage/emc/emc_vnx_sg_member.py +++ /dev/null @@ -1,174 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2018, Luca 'remix_tj' Lorenzetto -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# - -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: emc_vnx_sg_member - -short_description: Manage storage group member on EMC VNX - - -description: - - "This module manages the members of an existing storage group." - -extends_documentation_fragment: -- community.general.emc.emc_vnx - - -options: - name: - description: - - Name of the Storage group to manage. - required: true - type: str - lunid: - description: - - Lun id to be added. - required: true - type: int - state: - description: - - Indicates the desired lunid state. - - C(present) ensures specified lunid is present in the Storage Group. - - C(absent) ensures specified lunid is absent from Storage Group. - default: present - choices: [ "present", "absent"] - type: str - - -author: - - Luca 'remix_tj' Lorenzetto (@remixtj) -''' - -EXAMPLES = ''' -- name: Add lun to storage group - community.general.emc_vnx_sg_member: - name: sg01 - sp_address: sp1a.fqdn - sp_user: sysadmin - sp_password: sysadmin - lunid: 100 - state: present - -- name: Remove lun from storage group - community.general.emc_vnx_sg_member: - name: sg01 - sp_address: sp1a.fqdn - sp_user: sysadmin - sp_password: sysadmin - lunid: 100 - state: absent -''' - -RETURN = ''' -hluid: - description: LUNID that hosts attached to the storage group will see. - type: int - returned: success -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native -from ansible_collections.community.general.plugins.module_utils.storage.emc.emc_vnx import emc_vnx_argument_spec - -LIB_IMP_ERR = None -try: - from storops import VNXSystem - from storops.exception import VNXCredentialError, VNXStorageGroupError, \ - VNXAluAlreadyAttachedError, VNXAttachAluError, VNXDetachAluNotFoundError - HAS_LIB = True -except Exception: - LIB_IMP_ERR = traceback.format_exc() - HAS_LIB = False - - -def run_module(): - module_args = dict( - name=dict(type='str', required=True), - lunid=dict(type='int', required=True), - state=dict(default='present', choices=['present', 'absent']), - ) - - module_args.update(emc_vnx_argument_spec) - - result = dict( - changed=False, - hluid=None - ) - - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True - ) - - if not HAS_LIB: - module.fail_json(msg=missing_required_lib('storops >= 0.5.10'), - exception=LIB_IMP_ERR) - - sp_user = module.params['sp_user'] - sp_address = module.params['sp_address'] - sp_password = module.params['sp_password'] - alu = module.params['lunid'] - - # if the user is working with this module in only check mode we do not - # want to make any changes to the environment, just return the current - # state with no modifications - if module.check_mode: - return result - - try: - vnx = VNXSystem(sp_address, sp_user, sp_password) - sg = vnx.get_sg(module.params['name']) - if sg.existed: - if module.params['state'] == 'present': - if not sg.has_alu(alu): - try: - result['hluid'] = sg.attach_alu(alu) - result['changed'] = True - except VNXAluAlreadyAttachedError: - result['hluid'] = sg.get_hlu(alu) - except (VNXAttachAluError, VNXStorageGroupError) as e: - module.fail_json(msg='Error attaching {0}: ' - '{1} '.format(alu, to_native(e)), - **result) - else: - result['hluid'] = sg.get_hlu(alu) - if module.params['state'] == 'absent' and sg.has_alu(alu): - try: - sg.detach_alu(alu) - result['changed'] = True - except VNXDetachAluNotFoundError: - # being not attached when using absent is OK - pass - except VNXStorageGroupError as e: - module.fail_json(msg='Error detaching alu {0}: ' - '{1} '.format(alu, to_native(e)), - **result) - else: - module.fail_json(msg='No such storage group named ' - '{0}'.format(module.params['name']), - **result) - except VNXCredentialError as e: - module.fail_json(msg='{0}'.format(to_native(e)), **result) - - module.exit_json(**result) - - -def main(): - run_module() - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/storage/hpe3par/ss_3par_cpg.py b/ansible_collections/community/general/plugins/modules/storage/hpe3par/ss_3par_cpg.py deleted file mode 100644 index be4a6a02..00000000 --- a/ansible_collections/community/general/plugins/modules/storage/hpe3par/ss_3par_cpg.py +++ /dev/null @@ -1,296 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ -# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -short_description: Manage HPE StoreServ 3PAR CPG -author: - - Farhan Nomani (@farhan7500) - - Gautham P Hegde (@gautamphegde) -description: - - Create and delete CPG on HPE 3PAR. -module: ss_3par_cpg -options: - cpg_name: - description: - - Name of the CPG. - type: str - required: true - disk_type: - choices: - - FC - - NL - - SSD - description: - - Specifies that physical disks must have the specified device type. - type: str - domain: - description: - - Specifies the name of the domain in which the object will reside. - type: str - growth_increment: - description: - - Specifies the growth increment(in MiB, GiB or TiB) the amount of logical disk storage - created on each auto-grow operation. - type: str - growth_limit: - description: - - Specifies that the autogrow operation is limited to the specified - storage amount that sets the growth limit(in MiB, GiB or TiB). - type: str - growth_warning: - description: - - Specifies that the threshold(in MiB, GiB or TiB) of used logical disk space when exceeded - results in a warning alert. - type: str - high_availability: - choices: - - PORT - - CAGE - - MAG - description: - - Specifies that the layout must support the failure of one port pair, - one cage, or one magazine. - type: str - raid_type: - choices: - - R0 - - R1 - - R5 - - R6 - description: - - Specifies the RAID type for the logical disk. - type: str - set_size: - description: - - Specifies the set size in the number of chunklets. - type: int - state: - choices: - - present - - absent - description: - - Whether the specified CPG should exist or not. - required: true - type: str - secure: - description: - - Specifies whether the certificate needs to be validated while communicating. - type: bool - default: no -extends_documentation_fragment: -- community.general.hpe3par - -''' - - -EXAMPLES = r''' -- name: Create CPG sample_cpg - community.general.ss_3par_cpg: - storage_system_ip: 10.10.10.1 - storage_system_username: username - storage_system_password: password - state: present - cpg_name: sample_cpg - domain: sample_domain - growth_increment: 32000 MiB - growth_limit: 64000 MiB - growth_warning: 48000 MiB - raid_type: R6 - set_size: 8 - high_availability: MAG - disk_type: FC - secure: no - -- name: Delete CPG sample_cpg - community.general.ss_3par_cpg: - storage_system_ip: 10.10.10.1 - storage_system_username: username - storage_system_password: password - state: absent - cpg_name: sample_cpg - secure: no -''' - -RETURN = r''' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.storage.hpe3par import hpe3par -try: - from hpe3par_sdk import client - from hpe3parclient import exceptions - HAS_3PARCLIENT = True -except ImportError: - HAS_3PARCLIENT = False - - -def validate_set_size(raid_type, set_size): - if raid_type: - set_size_array = client.HPE3ParClient.RAID_MAP[raid_type]['set_sizes'] - if set_size in set_size_array: - return True - return False - - -def cpg_ldlayout_map(ldlayout_dict): - if ldlayout_dict['RAIDType'] is not None and ldlayout_dict['RAIDType']: - ldlayout_dict['RAIDType'] = client.HPE3ParClient.RAID_MAP[ - ldlayout_dict['RAIDType']]['raid_value'] - if ldlayout_dict['HA'] is not None and ldlayout_dict['HA']: - ldlayout_dict['HA'] = getattr( - client.HPE3ParClient, ldlayout_dict['HA']) - return ldlayout_dict - - -def create_cpg( - client_obj, - cpg_name, - domain, - growth_increment, - growth_limit, - growth_warning, - raid_type, - set_size, - high_availability, - disk_type): - try: - if not validate_set_size(raid_type, set_size): - return (False, False, "Set size %s not part of RAID set %s" % (set_size, raid_type)) - if not client_obj.cpgExists(cpg_name): - - disk_patterns = [] - if disk_type: - disk_type = getattr(client.HPE3ParClient, disk_type) - disk_patterns = [{'diskType': disk_type}] - ld_layout = { - 'RAIDType': raid_type, - 'setSize': set_size, - 'HA': high_availability, - 'diskPatterns': disk_patterns} - ld_layout = cpg_ldlayout_map(ld_layout) - if growth_increment is not None: - growth_increment = hpe3par.convert_to_binary_multiple( - growth_increment) - if growth_limit is not None: - growth_limit = hpe3par.convert_to_binary_multiple( - growth_limit) - if growth_warning is not None: - growth_warning = hpe3par.convert_to_binary_multiple( - growth_warning) - optional = { - 'domain': domain, - 'growthIncrementMiB': growth_increment, - 'growthLimitMiB': growth_limit, - 'usedLDWarningAlertMiB': growth_warning, - 'LDLayout': ld_layout} - client_obj.createCPG(cpg_name, optional) - else: - return (True, False, "CPG already present") - except exceptions.ClientException as e: - return (False, False, "CPG creation failed | %s" % (e)) - return (True, True, "Created CPG %s successfully." % cpg_name) - - -def delete_cpg( - client_obj, - cpg_name): - try: - if client_obj.cpgExists(cpg_name): - client_obj.deleteCPG(cpg_name) - else: - return (True, False, "CPG does not exist") - except exceptions.ClientException as e: - return (False, False, "CPG delete failed | %s" % e) - return (True, True, "Deleted CPG %s successfully." % cpg_name) - - -def main(): - module = AnsibleModule(argument_spec=hpe3par.cpg_argument_spec(), - required_together=[['raid_type', 'set_size']]) - if not HAS_3PARCLIENT: - module.fail_json(msg='the python hpe3par_sdk library is required (https://pypi.org/project/hpe3par_sdk)') - - if len(module.params["cpg_name"]) < 1 or len(module.params["cpg_name"]) > 31: - module.fail_json(msg="CPG name must be at least 1 character and not more than 31 characters") - - storage_system_ip = module.params["storage_system_ip"] - storage_system_username = module.params["storage_system_username"] - storage_system_password = module.params["storage_system_password"] - cpg_name = module.params["cpg_name"] - domain = module.params["domain"] - growth_increment = module.params["growth_increment"] - growth_limit = module.params["growth_limit"] - growth_warning = module.params["growth_warning"] - raid_type = module.params["raid_type"] - set_size = module.params["set_size"] - high_availability = module.params["high_availability"] - disk_type = module.params["disk_type"] - secure = module.params["secure"] - - wsapi_url = 'https://%s:8080/api/v1' % storage_system_ip - try: - client_obj = client.HPE3ParClient(wsapi_url, secure) - except exceptions.SSLCertFailed: - module.fail_json(msg="SSL Certificate Failed") - except exceptions.ConnectionError: - module.fail_json(msg="Connection Error") - except exceptions.UnsupportedVersion: - module.fail_json(msg="Unsupported WSAPI version") - except Exception as e: - module.fail_json(msg="Initializing client failed. %s" % e) - - if storage_system_username is None or storage_system_password is None: - module.fail_json(msg="Storage system username or password is None") - if cpg_name is None: - module.fail_json(msg="CPG Name is None") - - # States - if module.params["state"] == "present": - try: - client_obj.login(storage_system_username, storage_system_password) - return_status, changed, msg = create_cpg( - client_obj, - cpg_name, - domain, - growth_increment, - growth_limit, - growth_warning, - raid_type, - set_size, - high_availability, - disk_type - ) - except Exception as e: - module.fail_json(msg="CPG create failed | %s" % e) - finally: - client_obj.logout() - - elif module.params["state"] == "absent": - try: - client_obj.login(storage_system_username, storage_system_password) - return_status, changed, msg = delete_cpg( - client_obj, - cpg_name - ) - except Exception as e: - module.fail_json(msg="CPG create failed | %s" % e) - finally: - client_obj.logout() - - if return_status: - module.exit_json(changed=changed, msg=msg) - else: - module.fail_json(msg=msg) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_domain.py b/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_domain.py deleted file mode 100644 index 9c5e6c50..00000000 --- a/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_domain.py +++ /dev/null @@ -1,168 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, IBM CORPORATION -# Author(s): Tzur Eliyahu -# -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ibm_sa_domain -short_description: Manages domains on IBM Spectrum Accelerate Family storage systems - -description: - - "This module can be used to add domains to or removes them from IBM Spectrum Accelerate Family storage systems." - -options: - domain: - description: - - Name of the domain to be managed. - required: true - type: str - state: - description: - - The desired state of the domain. - default: "present" - choices: [ "present", "absent" ] - type: str - ldap_id: - description: - - ldap id to add to the domain. - required: false - type: str - size: - description: - - Size of the domain. - required: false - type: str - hard_capacity: - description: - - Hard capacity of the domain. - required: false - type: str - soft_capacity: - description: - - Soft capacity of the domain. - required: false - type: str - max_cgs: - description: - - Number of max cgs. - required: false - type: str - max_dms: - description: - - Number of max dms. - required: false - type: str - max_mirrors: - description: - - Number of max_mirrors. - required: false - type: str - max_pools: - description: - - Number of max_pools. - required: false - type: str - max_volumes: - description: - - Number of max_volumes. - required: false - type: str - perf_class: - description: - - Add the domain to a performance class. - required: false - type: str - -extends_documentation_fragment: -- community.general.ibm_storage - - -author: - - Tzur Eliyahu (@tzure) -''' - -EXAMPLES = ''' -- name: Define new domain. - community.general.ibm_sa_domain: - domain: domain_name - size: domain_size - state: present - username: admin - password: secret - endpoints: hostdev-system - -- name: Delete domain. - community.general.ibm_sa_domain: - domain: domain_name - state: absent - username: admin - password: secret - endpoints: hostdev-system -''' -RETURN = ''' -msg: - description: module return status. - returned: as needed - type: str - sample: "domain 'domain_name' created successfully." -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ - connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed - - -def main(): - argument_spec = spectrum_accelerate_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present', 'absent']), - domain=dict(required=True), - size=dict(), - max_dms=dict(), - max_cgs=dict(), - ldap_id=dict(), - max_mirrors=dict(), - max_pools=dict(), - max_volumes=dict(), - perf_class=dict(), - hard_capacity=dict(), - soft_capacity=dict() - ) - ) - - module = AnsibleModule(argument_spec) - - is_pyxcli_installed(module) - - xcli_client = connect_ssl(module) - domain = xcli_client.cmd.domain_list( - domain=module.params['domain']).as_single_element - state = module.params['state'] - - state_changed = False - msg = 'Domain \'{0}\''.format(module.params['domain']) - if state == 'present' and not domain: - state_changed = execute_pyxcli_command( - module, 'domain_create', xcli_client) - msg += " created successfully." - elif state == 'absent' and domain: - state_changed = execute_pyxcli_command( - module, 'domain_delete', xcli_client) - msg += " deleted successfully." - else: - msg += " state unchanged." - - module.exit_json(changed=state_changed, msg=msg) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_host.py b/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_host.py deleted file mode 100644 index 27a7287f..00000000 --- a/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_host.py +++ /dev/null @@ -1,124 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (C) 2018 IBM CORPORATION -# Author(s): Tzur Eliyahu -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ibm_sa_host -short_description: Adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems. - -description: - - "This module adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems." - -options: - host: - description: - - Host name. - required: true - type: str - state: - description: - - Host state. - default: "present" - choices: [ "present", "absent" ] - type: str - cluster: - description: - - The name of the cluster to include the host. - required: false - type: str - domain: - description: - - The domains the cluster will be attached to. - To include more than one domain, - separate domain names with commas. - To include all existing domains, use an asterisk ("*"). - required: false - type: str - iscsi_chap_name: - description: - - The host's CHAP name identifier - required: false - type: str - iscsi_chap_secret: - description: - - The password of the initiator used to - authenticate to the system when CHAP is enable - required: false - type: str - -extends_documentation_fragment: -- community.general.ibm_storage - - -author: - - Tzur Eliyahu (@tzure) -''' - -EXAMPLES = ''' -- name: Define new host. - community.general.ibm_sa_host: - host: host_name - state: present - username: admin - password: secret - endpoints: hostdev-system - -- name: Delete host. - community.general.ibm_sa_host: - host: host_name - state: absent - username: admin - password: secret - endpoints: hostdev-system -''' -RETURN = ''' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ - connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed - - -def main(): - argument_spec = spectrum_accelerate_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present', 'absent']), - host=dict(required=True), - cluster=dict(), - domain=dict(), - iscsi_chap_name=dict(), - iscsi_chap_secret=dict(no_log=True), - ) - ) - - module = AnsibleModule(argument_spec) - - is_pyxcli_installed(module) - - xcli_client = connect_ssl(module) - host = xcli_client.cmd.host_list( - host=module.params['host']).as_single_element - state = module.params['state'] - - state_changed = False - if state == 'present' and not host: - state_changed = execute_pyxcli_command( - module, 'host_define', xcli_client) - elif state == 'absent' and host: - state_changed = execute_pyxcli_command( - module, 'host_delete', xcli_client) - - module.exit_json(changed=state_changed) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_host_ports.py b/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_host_ports.py deleted file mode 100644 index 32daa9f3..00000000 --- a/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_host_ports.py +++ /dev/null @@ -1,131 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (C) 2018 IBM CORPORATION -# Author(s): Tzur Eliyahu -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ibm_sa_host_ports -short_description: Add host ports on IBM Spectrum Accelerate Family storage systems. - -description: - - "This module adds ports to or removes them from the hosts - on IBM Spectrum Accelerate Family storage systems." - -options: - host: - description: - - Host name. - required: true - type: str - state: - description: - - Host ports state. - default: "present" - choices: [ "present", "absent" ] - type: str - iscsi_name: - description: - - iSCSI initiator name. - required: false - type: str - fcaddress: - description: - - Fiber channel address. - required: false - type: str - num_of_visible_targets: - description: - - Number of visible targets. - required: false - type: str - -extends_documentation_fragment: -- community.general.ibm_storage - -author: - - Tzur Eliyahu (@tzure) -''' - -EXAMPLES = ''' -- name: Add ports for host. - community.general.ibm_sa_host_ports: - host: test_host - iscsi_name: iqn.1994-05.com*** - username: admin - password: secret - endpoints: hostdev-system - state: present - -- name: Remove ports for host. - community.general.ibm_sa_host_ports: - host: test_host - iscsi_name: iqn.1994-05.com*** - username: admin - password: secret - endpoints: hostdev-system - state: absent - -''' -RETURN = ''' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import (execute_pyxcli_command, connect_ssl, - spectrum_accelerate_spec, is_pyxcli_installed) - - -def main(): - argument_spec = spectrum_accelerate_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present', 'absent']), - host=dict(required=True), - iscsi_name=dict(), - fcaddress=dict(), - num_of_visible_targets=dict() - ) - ) - - module = AnsibleModule(argument_spec) - is_pyxcli_installed(module) - - xcli_client = connect_ssl(module) - # required args - ports = [] - try: - ports = xcli_client.cmd.host_list_ports( - host=module.params.get('host')).as_list - except Exception: - pass - state = module.params['state'] - port_exists = False - ports = [port.get('port_name') for port in ports] - - fc_ports = (module.params.get('fcaddress') - if module.params.get('fcaddress') else []) - iscsi_ports = (module.params.get('iscsi_name') - if module.params.get('iscsi_name') else []) - for port in ports: - if port in iscsi_ports or port in fc_ports: - port_exists = True - break - state_changed = False - if state == 'present' and not port_exists: - state_changed = execute_pyxcli_command( - module, 'host_add_port', xcli_client) - if state == 'absent' and port_exists: - state_changed = execute_pyxcli_command( - module, 'host_remove_port', xcli_client) - - module.exit_json(changed=state_changed) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_pool.py b/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_pool.py deleted file mode 100644 index 67c963ac..00000000 --- a/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_pool.py +++ /dev/null @@ -1,121 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (C) 2018 IBM CORPORATION -# Author(s): Tzur Eliyahu -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ibm_sa_pool -short_description: Handles pools on IBM Spectrum Accelerate Family storage systems. - -description: - - "This module creates or deletes pools to be used on IBM Spectrum Accelerate Family storage systems" - -options: - pool: - description: - - Pool name. - required: true - type: str - state: - description: - - Pool state. - default: "present" - choices: [ "present", "absent" ] - type: str - size: - description: - - Pool size in GB - required: false - type: str - snapshot_size: - description: - - Pool snapshot size in GB - required: false - type: str - domain: - description: - - Adds the pool to the specified domain. - required: false - type: str - perf_class: - description: - - Assigns a perf_class to the pool. - required: false - type: str - -extends_documentation_fragment: -- community.general.ibm_storage - - -author: - - Tzur Eliyahu (@tzure) -''' - -EXAMPLES = ''' -- name: Create new pool. - community.general.ibm_sa_pool: - name: pool_name - size: 300 - state: present - username: admin - password: secret - endpoints: hostdev-system - -- name: Delete pool. - community.general.ibm_sa_pool: - name: pool_name - state: absent - username: admin - password: secret - endpoints: hostdev-system -''' -RETURN = ''' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ - connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed - - -def main(): - argument_spec = spectrum_accelerate_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present', 'absent']), - pool=dict(required=True), - size=dict(), - snapshot_size=dict(), - domain=dict(), - perf_class=dict() - ) - ) - - module = AnsibleModule(argument_spec) - - is_pyxcli_installed(module) - - xcli_client = connect_ssl(module) - pool = xcli_client.cmd.pool_list( - pool=module.params['pool']).as_single_element - state = module.params['state'] - - state_changed = False - if state == 'present' and not pool: - state_changed = execute_pyxcli_command( - module, 'pool_create', xcli_client) - if state == 'absent' and pool: - state_changed = execute_pyxcli_command( - module, 'pool_delete', xcli_client) - - module.exit_json(changed=state_changed) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_vol.py b/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_vol.py deleted file mode 100644 index 7820d268..00000000 --- a/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_vol.py +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (C) 2018 IBM CORPORATION -# Author(s): Tzur Eliyahu -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ibm_sa_vol -short_description: Handle volumes on IBM Spectrum Accelerate Family storage systems. - -description: - - "This module creates or deletes volumes to be used on IBM Spectrum Accelerate Family storage systems." - -options: - vol: - description: - - Volume name. - required: true - type: str - pool: - description: - - Volume pool. - required: false - type: str - state: - description: - - Volume state. - default: "present" - choices: [ "present", "absent" ] - type: str - size: - description: - - Volume size. - required: false - type: str - -extends_documentation_fragment: -- community.general.ibm_storage - - -author: - - Tzur Eliyahu (@tzure) -''' - -EXAMPLES = ''' -- name: Create a new volume. - community.general.ibm_sa_vol: - vol: volume_name - pool: pool_name - size: 17 - state: present - username: admin - password: secret - endpoints: hostdev-system - -- name: Delete an existing volume. - community.general.ibm_sa_vol: - vol: volume_name - state: absent - username: admin - password: secret - endpoints: hostdev-system -''' -RETURN = ''' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ - connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed - - -def main(): - argument_spec = spectrum_accelerate_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present', 'absent']), - vol=dict(required=True), - pool=dict(), - size=dict() - ) - ) - - module = AnsibleModule(argument_spec) - - is_pyxcli_installed(module) - - xcli_client = connect_ssl(module) - # required args - volume = xcli_client.cmd.vol_list( - vol=module.params.get('vol')).as_single_element - state = module.params['state'] - - state_changed = False - if state == 'present' and not volume: - state_changed = execute_pyxcli_command( - module, 'vol_create', xcli_client) - elif state == 'absent' and volume: - state_changed = execute_pyxcli_command( - module, 'vol_delete', xcli_client) - - module.exit_json(changed=state_changed) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_vol_map.py b/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_vol_map.py deleted file mode 100644 index b449ba8d..00000000 --- a/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_vol_map.py +++ /dev/null @@ -1,142 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (C) 2018 IBM CORPORATION -# Author(s): Tzur Eliyahu -# -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ibm_sa_vol_map -short_description: Handles volume mapping on IBM Spectrum Accelerate Family storage systems. - -description: - - "This module maps volumes to or unmaps them from the hosts on - IBM Spectrum Accelerate Family storage systems." - -options: - vol: - description: - - Volume name. - required: true - type: str - state: - default: "present" - choices: [ "present", "absent" ] - description: - - When the state is present the volume is mapped. - When the state is absent, the volume is meant to be unmapped. - type: str - - cluster: - description: - - Maps the volume to a cluster. - required: false - type: str - host: - description: - - Maps the volume to a host. - required: false - type: str - lun: - description: - - The LUN identifier. - required: false - type: str - override: - description: - - Overrides the existing volume mapping. - required: false - type: str - -extends_documentation_fragment: -- community.general.ibm_storage - - -author: - - Tzur Eliyahu (@tzure) -''' - -EXAMPLES = ''' -- name: Map volume to host. - community.general.ibm_sa_vol_map: - vol: volume_name - lun: 1 - host: host_name - username: admin - password: secret - endpoints: hostdev-system - state: present - -- name: Map volume to cluster. - community.general.ibm_sa_vol_map: - vol: volume_name - lun: 1 - cluster: cluster_name - username: admin - password: secret - endpoints: hostdev-system - state: present - -- name: Unmap volume. - community.general.ibm_sa_vol_map: - host: host_name - username: admin - password: secret - endpoints: hostdev-system - state: absent -''' -RETURN = ''' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import (execute_pyxcli_command, - connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed) - - -def main(): - argument_spec = spectrum_accelerate_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present', 'absent']), - vol=dict(required=True), - lun=dict(), - cluster=dict(), - host=dict(), - override=dict() - ) - ) - - module = AnsibleModule(argument_spec) - is_pyxcli_installed(module) - - xcli_client = connect_ssl(module) - # required args - mapping = False - try: - mapped_hosts = xcli_client.cmd.vol_mapping_list( - vol=module.params.get('vol')).as_list - for host in mapped_hosts: - if host['host'] == module.params.get("host", ""): - mapping = True - except Exception: - pass - state = module.params['state'] - - state_changed = False - if state == 'present' and not mapping: - state_changed = execute_pyxcli_command(module, 'map_vol', xcli_client) - if state == 'absent' and mapping: - state_changed = execute_pyxcli_command( - module, 'unmap_vol', xcli_client) - - module.exit_json(changed=state_changed) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/storage/pmem/pmem.py b/ansible_collections/community/general/plugins/modules/storage/pmem/pmem.py deleted file mode 100644 index b91bab5f..00000000 --- a/ansible_collections/community/general/plugins/modules/storage/pmem/pmem.py +++ /dev/null @@ -1,628 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2022, Masayoshi Mizuma -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -author: - - Masayoshi Mizuma (@mizumm) -module: pmem -short_description: Configure Intel Optane Persistent Memory modules -version_added: 4.5.0 -description: - - This module allows Configuring Intel Optane Persistent Memory modules - (PMem) using ipmctl and ndctl command line tools. -requirements: - - ipmctl and ndctl command line tools - - xmltodict -options: - appdirect: - description: - - Percentage of the total capacity to use in AppDirect Mode (C(0)-C(100)). - - Create AppDirect capacity utilizing hardware interleaving across the - requested PMem modules if applicable given the specified target. - - Total of I(appdirect), I(memorymode) and I(reserved) must be C(100) - type: int - appdirect_interleaved: - description: - - Create AppDirect capacity that is interleaved any other PMem modules. - type: bool - required: false - default: true - memorymode: - description: - - Percentage of the total capacity to use in Memory Mode (C(0)-C(100)). - type: int - reserved: - description: - - Percentage of the capacity to reserve (C(0)-C(100)). I(reserved) will not be mapped - into the system physical address space and will be presented as reserved - capacity with Show Device and Show Memory Resources Commands. - - I(reserved) will be set automatically if this is not configured. - type: int - required: false - socket: - description: - - This enables to set the configuration for each socket by using the socket ID. - - Total of I(appdirect), I(memorymode) and I(reserved) must be C(100) within one socket. - type: list - elements: dict - suboptions: - id: - description: The socket ID of the PMem module. - type: int - required: true - appdirect: - description: - - Percentage of the total capacity to use in AppDirect Mode (C(0)-C(100)) within the socket ID. - type: int - required: true - appdirect_interleaved: - description: - - Create AppDirect capacity that is interleaved any other PMem modules within the socket ID. - type: bool - required: false - default: true - memorymode: - description: - - Percentage of the total capacity to use in Memory Mode (C(0)-C(100)) within the socket ID. - type: int - required: true - reserved: - description: - - Percentage of the capacity to reserve (C(0)-C(100)) within the socket ID. - type: int - namespace: - description: - - This enables to set the configuration for the namespace of the PMem. - type: list - elements: dict - suboptions: - mode: - description: - - The mode of namespace. The detail of the mode is in the man page of ndctl-create-namespace. - type: str - required: true - choices: ['raw', 'sector', 'fsdax', 'devdax'] - type: - description: - - The type of namespace. The detail of the type is in the man page of ndctl-create-namespace. - type: str - required: false - choices: ['pmem', 'blk'] - size: - description: - - The size of namespace. This option supports the suffixes C(k) or C(K) or C(KB) for KiB, - C(m) or C(M) or C(MB) for MiB, C(g) or C(G) or C(GB) for GiB and C(t) or C(T) or C(TB) for TiB. - - This option is required if multiple namespaces are configured. - - If this option is not set, all of the avaiable space of a region is configured. - type: str - required: false - namespace_append: - description: - - Enable to append the new namespaces to the system. - - The default is C(false) so the all existing namespaces not listed in I(namespace) are removed. - type: bool - default: false - required: false -''' - -RETURN = r''' -reboot_required: - description: Indicates that the system reboot is required to complete the PMem configuration. - returned: success - type: bool - sample: True -result: - description: - - Shows the value of AppDirect, Memory Mode and Reserved size in bytes. - - If I(socket) argument is provided, shows the values in each socket with C(socket) which contains the socket ID. - - If I(namespace) argument is provided, shows the detail of each namespace. - returned: success - type: list - elements: dict - contains: - appdirect: - description: AppDirect size in bytes. - type: int - memorymode: - description: Memory Mode size in bytes. - type: int - reserved: - description: Reserved size in bytes. - type: int - socket: - description: The socket ID to be configured. - type: int - namespace: - description: The list of the detail of namespace. - type: list - sample: [ - { - "appdirect": 111669149696, - "memorymode": 970662608896, - "reserved": 3626500096, - "socket": 0 - }, - { - "appdirect": 111669149696, - "memorymode": 970662608896, - "reserved": 3626500096, - "socket": 1 - } - ] -''' - -EXAMPLES = r''' -- name: Configure the Pmem as AppDirect 10, Memory Mode 70, and the Reserved 20 percent. - community.general.pmem: - appdirect: 10 - memorymode: 70 - -- name: Configure the Pmem as AppDirect 10, Memory Mode 80, and the Reserved 10 percent. - community.general.pmem: - appdirect: 10 - memorymode: 80 - reserved: 10 - -- name: Configure the Pmem as AppDirect with not interleaved 10, Memory Mode 70, and the Reserved 20 percent. - community.general.pmem: - appdirect: 10 - appdirect_interleaved: False - memorymode: 70 - -- name: Configure the Pmem each socket. - community.general.pmem: - socket: - - id: 0 - appdirect: 10 - appdirect_interleaved: False - memorymode: 70 - reserved: 20 - - id: 1 - appdirect: 10 - memorymode: 80 - reserved: 10 - -- name: Configure the two namespaces. - community.general.pmem: - namespace: - - size: 1GB - type: pmem - mode: raw - - size: 320MB - type: pmem - mode: sector -''' - -import json -import re -import traceback -from ansible.module_utils.basic import AnsibleModule, missing_required_lib, human_to_bytes - -try: - import xmltodict -except ImportError: - HAS_XMLTODICT_LIBRARY = False - XMLTODICT_LIBRARY_IMPORT_ERROR = traceback.format_exc() -else: - HAS_XMLTODICT_LIBRARY = True - - -class PersistentMemory(object): - def __init__(self): - module = AnsibleModule( - argument_spec=dict( - appdirect=dict(type='int'), - appdirect_interleaved=dict(type='bool', default=True), - memorymode=dict(type='int'), - reserved=dict(type='int'), - socket=dict( - type='list', elements='dict', - options=dict( - id=dict(required=True, type='int'), - appdirect=dict(required=True, type='int'), - appdirect_interleaved=dict(type='bool', default=True), - memorymode=dict(required=True, type='int'), - reserved=dict(type='int'), - ), - ), - namespace=dict( - type='list', elements='dict', - options=dict( - mode=dict(required=True, type='str', choices=['raw', 'sector', 'fsdax', 'devdax']), - type=dict(type='str', choices=['pmem', 'blk']), - size=dict(type='str'), - ), - ), - namespace_append=dict(type='bool', default=False), - ), - required_together=( - ['appdirect', 'memorymode'], - ), - required_one_of=( - ['appdirect', 'memorymode', 'socket', 'namespace'], - ), - mutually_exclusive=( - ['appdirect', 'socket'], - ['memorymode', 'socket'], - ['appdirect', 'namespace'], - ['memorymode', 'namespace'], - ['socket', 'namespace'], - ['appdirect', 'namespace_append'], - ['memorymode', 'namespace_append'], - ['socket', 'namespace_append'], - ), - ) - - if not HAS_XMLTODICT_LIBRARY: - module.fail_json( - msg=missing_required_lib('xmltodict'), - exception=XMLTODICT_LIBRARY_IMPORT_ERROR) - - self.ipmctl_exec = module.get_bin_path('ipmctl', True) - self.ndctl_exec = module.get_bin_path('ndctl', True) - - self.appdirect = module.params['appdirect'] - self.interleaved = module.params['appdirect_interleaved'] - self.memmode = module.params['memorymode'] - self.reserved = module.params['reserved'] - self.socket = module.params['socket'] - self.namespace = module.params['namespace'] - self.namespace_append = module.params['namespace_append'] - - self.module = module - self.changed = False - self.result = [] - - def pmem_run_command(self, command, returnCheck=True): - # in case command[] has number - cmd = [str(part) for part in command] - - self.module.log(msg='pmem_run_command: execute: %s' % cmd) - - rc, out, err = self.module.run_command(cmd) - - self.module.log(msg='pmem_run_command: result: %s' % out) - - if returnCheck and rc != 0: - self.module.fail_json(msg='Error while running: %s' % - cmd, rc=rc, out=out, err=err) - - return out - - def pmem_run_ipmctl(self, command, returnCheck=True): - - command = [self.ipmctl_exec] + command - - return self.pmem_run_command(command, returnCheck) - - def pmem_run_ndctl(self, command, returnCheck=True): - - command = [self.ndctl_exec] + command - - return self.pmem_run_command(command, returnCheck) - - def pmem_is_dcpmm_installed(self): - # To check this system has dcpmm - command = ['show', '-system', '-capabilities'] - return self.pmem_run_ipmctl(command) - - def pmem_get_region_align_size(self, region): - aligns = [] - for rg in region: - if rg['align'] not in aligns: - aligns.append(rg['align']) - - return aligns - - def pmem_get_available_region_size(self, region): - available_size = [] - for rg in region: - available_size.append(rg['available_size']) - - return available_size - - def pmem_get_available_region_type(self, region): - types = [] - for rg in region: - if rg['type'] not in types: - types.append(rg['type']) - - return types - - def pmem_argument_check(self): - def namespace_check(self): - command = ['list', '-R'] - out = self.pmem_run_ndctl(command) - if not out: - return 'Available region(s) is not in this system.' - region = json.loads(out) - - aligns = self.pmem_get_region_align_size(region) - if len(aligns) != 1: - return 'Not supported the regions whose alignment size is different.' - - available_size = self.pmem_get_available_region_size(region) - types = self.pmem_get_available_region_type(region) - for ns in self.namespace: - if ns['size']: - try: - size_byte = human_to_bytes(ns['size']) - except ValueError: - return 'The format of size: NNN TB|GB|MB|KB|T|G|M|K|B' - - if size_byte % aligns[0] != 0: - return 'size: %s should be align with %d' % (ns['size'], aligns[0]) - - is_space_enough = False - for i, avail in enumerate(available_size): - if avail > size_byte: - available_size[i] -= size_byte - is_space_enough = True - break - - if is_space_enough is False: - return 'There is not available region for size: %s' % ns['size'] - - ns['size_byte'] = size_byte - - elif len(self.namespace) != 1: - return 'size option is required to configure multiple namespaces' - - if ns['type'] not in types: - return 'type %s is not supported in this system. Supported type: %s' % (ns['type'], types) - - return None - - def percent_check(self, appdirect, memmode, reserved=None): - if appdirect is None or (appdirect < 0 or appdirect > 100): - return 'appdirect percent should be from 0 to 100.' - if memmode is None or (memmode < 0 or memmode > 100): - return 'memorymode percent should be from 0 to 100.' - - if reserved is None: - if appdirect + memmode > 100: - return 'Total percent should be less equal 100.' - else: - if reserved < 0 or reserved > 100: - return 'reserved percent should be from 0 to 100.' - if appdirect + memmode + reserved != 100: - return 'Total percent should be 100.' - - def socket_id_check(self): - command = ['show', '-o', 'nvmxml', '-socket'] - out = self.pmem_run_ipmctl(command) - sockets_dict = xmltodict.parse(out, dict_constructor=dict)['SocketList']['Socket'] - socket_ids = [] - for sl in sockets_dict: - socket_ids.append(int(sl['SocketID'], 16)) - - for skt in self.socket: - if skt['id'] not in socket_ids: - return 'Invalid socket number: %d' % skt['id'] - - return None - - if self.namespace: - return namespace_check(self) - elif self.socket is None: - return percent_check(self, self.appdirect, self.memmode, self.reserved) - else: - ret = socket_id_check(self) - if ret is not None: - return ret - - for skt in self.socket: - ret = percent_check( - self, skt['appdirect'], skt['memorymode'], skt['reserved']) - if ret is not None: - return ret - - return None - - def pmem_remove_namespaces(self): - command = ['list', '-N'] - out = self.pmem_run_ndctl(command) - - # There's nothing namespaces in this system. Nothing to do. - if not out: - return - - namespaces = json.loads(out) - - # Disable and destroy all namespaces - for ns in namespaces: - command = ['disable-namespace', ns['dev']] - self.pmem_run_ndctl(command) - - command = ['destroy-namespace', ns['dev']] - self.pmem_run_ndctl(command) - - return - - def pmem_delete_goal(self): - # delete the goal request - command = ['delete', '-goal'] - self.pmem_run_ipmctl(command) - - def pmem_init_env(self): - if self.namespace is None or (self.namespace and self.namespace_append is False): - self.pmem_remove_namespaces() - if self.namespace is None: - self.pmem_delete_goal() - - def pmem_get_capacity(self, skt=None): - command = ['show', '-d', 'Capacity', '-u', 'B', '-o', 'nvmxml', '-dimm'] - if skt: - command += ['-socket', skt['id']] - out = self.pmem_run_ipmctl(command) - - dimm_list = xmltodict.parse(out, dict_constructor=dict)['DimmList']['Dimm'] - capacity = 0 - for entry in dimm_list: - for key, v in entry.items(): - if key == 'Capacity': - capacity += int(v.split()[0]) - - return capacity - - def pmem_create_memory_allocation(self, skt=None): - def build_ipmctl_creation_opts(self, skt=None): - ipmctl_opts = [] - - if skt: - appdirect = skt['appdirect'] - memmode = skt['memorymode'] - reserved = skt['reserved'] - socket_id = skt['id'] - ipmctl_opts += ['-socket', socket_id] - else: - appdirect = self.appdirect - memmode = self.memmode - reserved = self.reserved - - if reserved is None: - res = 100 - memmode - appdirect - ipmctl_opts += ['memorymode=%d' % memmode, 'reserved=%d' % res] - else: - ipmctl_opts += ['memorymode=%d' % memmode, 'reserved=%d' % reserved] - - if self.interleaved: - ipmctl_opts += ['PersistentMemoryType=AppDirect'] - else: - ipmctl_opts += ['PersistentMemoryType=AppDirectNotInterleaved'] - - return ipmctl_opts - - def is_allocation_good(self, ipmctl_out, command): - warning = re.compile('WARNING') - error = re.compile('.*Error.*') - ignore_error = re.compile( - 'Do you want to continue? [y/n] Error: Invalid data input.') - - errmsg = '' - rc = True - for line in ipmctl_out.splitlines(): - if warning.match(line): - errmsg = '%s (command: %s)' % (line, command) - rc = False - break - elif error.match(line): - if not ignore_error: - errmsg = '%s (command: %s)' % (line, command) - rc = False - break - - return rc, errmsg - - def get_allocation_result(self, goal, skt=None): - ret = {'appdirect': 0, 'memorymode': 0} - - if skt: - ret['socket'] = skt['id'] - - out = xmltodict.parse(goal, dict_constructor=dict)['ConfigGoalList']['ConfigGoal'] - for entry in out: - - # Probably it's a bug of ipmctl to show the socket goal - # which isn't specified by the -socket option. - # Anyway, filter the noise out here: - if skt and skt['id'] != int(entry['SocketID'], 16): - continue - - for key, v in entry.items(): - if key == 'MemorySize': - ret['memorymode'] += int(v.split()[0]) - elif key == 'AppDirect1Size' or key == 'AapDirect2Size': - ret['appdirect'] += int(v.split()[0]) - - capacity = self.pmem_get_capacity(skt) - ret['reserved'] = capacity - ret['appdirect'] - ret['memorymode'] - - return ret - - reboot_required = False - - ipmctl_opts = build_ipmctl_creation_opts(self, skt) - - # First, do dry run ipmctl create command to check the error and warning. - command = ['create', '-goal'] + ipmctl_opts - out = self.pmem_run_ipmctl(command, returnCheck=False) - rc, errmsg = is_allocation_good(self, out, command) - if rc is False: - return reboot_required, {}, errmsg - - # Run actual creation here - command = ['create', '-u', 'B', '-o', 'nvmxml', '-force', '-goal'] + ipmctl_opts - goal = self.pmem_run_ipmctl(command) - ret = get_allocation_result(self, goal, skt) - reboot_required = True - - return reboot_required, ret, '' - - def pmem_config_namespaces(self, namespace): - command = ['create-namespace', '-m', namespace['mode']] - if namespace['type']: - command += ['-t', namespace['type']] - if 'size_byte' in namespace: - command += ['-s', namespace['size_byte']] - - self.pmem_run_ndctl(command) - - return None - - -def main(): - - pmem = PersistentMemory() - - pmem.pmem_is_dcpmm_installed() - - error = pmem.pmem_argument_check() - if error: - pmem.module.fail_json(msg=error) - - pmem.pmem_init_env() - pmem.changed = True - - if pmem.namespace: - for ns in pmem.namespace: - pmem.pmem_config_namespaces(ns) - - command = ['list', '-N'] - out = pmem.pmem_run_ndctl(command) - all_ns = json.loads(out) - - pmem.result = all_ns - reboot_required = False - elif pmem.socket is None: - reboot_required, ret, errmsg = pmem.pmem_create_memory_allocation() - if errmsg: - pmem.module.fail_json(msg=errmsg) - pmem.result.append(ret) - else: - for skt in pmem.socket: - skt_reboot_required, skt_ret, skt_errmsg = pmem.pmem_create_memory_allocation(skt) - - if skt_errmsg: - pmem.module.fail_json(msg=skt_errmsg) - - if skt_reboot_required: - reboot_required = True - - pmem.result.append(skt_ret) - - pmem.module.exit_json( - changed=pmem.changed, - reboot_required=reboot_required, - result=pmem.result - ) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/storage/vexata/vexata_eg.py b/ansible_collections/community/general/plugins/modules/storage/vexata/vexata_eg.py deleted file mode 100644 index 54bb8c29..00000000 --- a/ansible_collections/community/general/plugins/modules/storage/vexata/vexata_eg.py +++ /dev/null @@ -1,209 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Sandeep Kasargod (sandeep@vexata.com) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: vexata_eg -short_description: Manage export groups on Vexata VX100 storage arrays -description: - - Create or delete export groups on a Vexata VX100 array. - - An export group is a tuple of a volume group, initiator group and port - group that allows a set of volumes to be exposed to one or more hosts - through specific array ports. -author: - - Sandeep Kasargod (@vexata) -options: - name: - description: - - Export group name. - required: true - type: str - state: - description: - - Creates export group when present or delete when absent. - default: present - choices: [ present, absent ] - type: str - vg: - description: - - Volume group name. - type: str - ig: - description: - - Initiator group name. - type: str - pg: - description: - - Port group name. - type: str -extends_documentation_fragment: -- community.general.vexata.vx100 - -''' - -EXAMPLES = r''' -- name: Create export group named db_export. - community.general.vexata_eg: - name: db_export - vg: dbvols - ig: dbhosts - pg: pg1 - state: present - array: vx100_ultra.test.com - user: admin - password: secret - -- name: Delete export group named db_export - community.general.vexata_eg: - name: db_export - state: absent - array: vx100_ultra.test.com - user: admin - password: secret -''' - -RETURN = r''' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.vexata import ( - argument_spec, get_array, required_together) - - -def get_eg(module, array): - """Retrieve a named vg if it exists, None if absent.""" - name = module.params['name'] - try: - egs = array.list_egs() - eg = filter(lambda eg: eg['name'] == name, egs) - if len(eg) == 1: - return eg[0] - else: - return None - except Exception: - module.fail_json(msg='Error while attempting to retrieve export groups.') - - -def get_vg_id(module, array): - """Retrieve a named vg's id if it exists, error if absent.""" - name = module.params['vg'] - try: - vgs = array.list_vgs() - vg = filter(lambda vg: vg['name'] == name, vgs) - if len(vg) == 1: - return vg[0]['id'] - else: - module.fail_json(msg='Volume group {0} was not found.'.format(name)) - except Exception: - module.fail_json(msg='Error while attempting to retrieve volume groups.') - - -def get_ig_id(module, array): - """Retrieve a named ig's id if it exists, error if absent.""" - name = module.params['ig'] - try: - igs = array.list_igs() - ig = filter(lambda ig: ig['name'] == name, igs) - if len(ig) == 1: - return ig[0]['id'] - else: - module.fail_json(msg='Initiator group {0} was not found.'.format(name)) - except Exception: - module.fail_json(msg='Error while attempting to retrieve initiator groups.') - - -def get_pg_id(module, array): - """Retrieve a named pg's id if it exists, error if absent.""" - name = module.params['pg'] - try: - pgs = array.list_pgs() - pg = filter(lambda pg: pg['name'] == name, pgs) - if len(pg) == 1: - return pg[0]['id'] - else: - module.fail_json(msg='Port group {0} was not found.'.format(name)) - except Exception: - module.fail_json(msg='Error while attempting to retrieve port groups.') - - -def create_eg(module, array): - """"Create a new export group.""" - changed = False - eg_name = module.params['name'] - vg_id = get_vg_id(module, array) - ig_id = get_ig_id(module, array) - pg_id = get_pg_id(module, array) - if module.check_mode: - module.exit_json(changed=changed) - - try: - eg = array.create_eg( - eg_name, - 'Ansible export group', - (vg_id, ig_id, pg_id)) - if eg: - module.log(msg='Created export group {0}'.format(eg_name)) - changed = True - else: - raise Exception - except Exception: - module.fail_json(msg='Export group {0} create failed.'.format(eg_name)) - module.exit_json(changed=changed) - - -def delete_eg(module, array, eg): - changed = False - eg_name = eg['name'] - if module.check_mode: - module.exit_json(changed=changed) - - try: - ok = array.delete_eg( - eg['id']) - if ok: - module.log(msg='Export group {0} deleted.'.format(eg_name)) - changed = True - else: - raise Exception - except Exception: - module.fail_json(msg='Export group {0} delete failed.'.format(eg_name)) - module.exit_json(changed=changed) - - -def main(): - arg_spec = argument_spec() - arg_spec.update( - dict( - name=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['present', 'absent']), - vg=dict(type='str'), - ig=dict(type='str'), - pg=dict(type='str') - ) - ) - - module = AnsibleModule(arg_spec, - supports_check_mode=True, - required_together=required_together()) - - state = module.params['state'] - array = get_array(module) - eg = get_eg(module, array) - - if state == 'present' and not eg: - create_eg(module, array) - elif state == 'absent' and eg: - delete_eg(module, array, eg) - else: - module.exit_json(changed=False) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/storage/vexata/vexata_volume.py b/ansible_collections/community/general/plugins/modules/storage/vexata/vexata_volume.py deleted file mode 100644 index 1cf4cd7b..00000000 --- a/ansible_collections/community/general/plugins/modules/storage/vexata/vexata_volume.py +++ /dev/null @@ -1,196 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, Sandeep Kasargod (sandeep@vexata.com) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: vexata_volume -short_description: Manage volumes on Vexata VX100 storage arrays -description: - - Create, deletes or extend volumes on a Vexata VX100 array. -author: -- Sandeep Kasargod (@vexata) -options: - name: - description: - - Volume name. - required: true - type: str - state: - description: - - Creates/Modifies volume when present or removes when absent. - default: present - choices: [ present, absent ] - type: str - size: - description: - - Volume size in M, G, T units. M=2^20, G=2^30, T=2^40 bytes. - type: str -extends_documentation_fragment: -- community.general.vexata.vx100 - -''' - -EXAMPLES = r''' -- name: Create new 2 TiB volume named foo - community.general.vexata_volume: - name: foo - size: 2T - state: present - array: vx100_ultra.test.com - user: admin - password: secret - -- name: Expand volume named foo to 4 TiB - community.general.vexata_volume: - name: foo - size: 4T - state: present - array: vx100_ultra.test.com - user: admin - password: secret - -- name: Delete volume named foo - community.general.vexata_volume: - name: foo - state: absent - array: vx100_ultra.test.com - user: admin - password: secret -''' - -RETURN = r''' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.vexata import ( - argument_spec, get_array, required_together, size_to_MiB) - - -def get_volume(module, array): - """Retrieve a named volume if it exists, None if absent.""" - name = module.params['name'] - try: - vols = array.list_volumes() - vol = filter(lambda v: v['name'] == name, vols) - if len(vol) == 1: - return vol[0] - else: - return None - except Exception: - module.fail_json(msg='Error while attempting to retrieve volumes.') - - -def validate_size(module, err_msg): - size = module.params.get('size', False) - if not size: - module.fail_json(msg=err_msg) - size = size_to_MiB(size) - if size <= 0: - module.fail_json(msg='Invalid volume size, must be [MGT].') - return size - - -def create_volume(module, array): - """"Create a new volume.""" - changed = False - size = validate_size(module, err_msg='Size is required to create volume.') - if module.check_mode: - module.exit_json(changed=changed) - - try: - vol = array.create_volume( - module.params['name'], - 'Ansible volume', - size) - if vol: - module.log(msg='Created volume {0}'.format(vol['id'])) - changed = True - else: - module.fail_json(msg='Volume create failed.') - except Exception: - pass - module.exit_json(changed=changed) - - -def update_volume(module, array, volume): - """Expand the volume size.""" - changed = False - size = validate_size(module, err_msg='Size is required to update volume') - prev_size = volume['volSize'] - if size <= prev_size: - module.log(msg='Volume expanded size needs to be larger ' - 'than current size.') - if module.check_mode: - module.exit_json(changed=changed) - - try: - vol = array.grow_volume( - volume['name'], - volume['description'], - volume['id'], - size) - if vol: - changed = True - except Exception: - pass - - module.exit_json(changed=changed) - - -def delete_volume(module, array, volume): - changed = False - vol_name = volume['name'] - if module.check_mode: - module.exit_json(changed=changed) - - try: - ok = array.delete_volume( - volume['id']) - if ok: - module.log(msg='Volume {0} deleted.'.format(vol_name)) - changed = True - else: - raise Exception - except Exception: - pass - module.exit_json(changed=changed) - - -def main(): - arg_spec = argument_spec() - arg_spec.update( - dict( - name=dict(type='str', required=True), - state=dict(default='present', choices=['present', 'absent']), - size=dict(type='str') - ) - ) - - module = AnsibleModule(arg_spec, - supports_check_mode=True, - required_together=required_together()) - - state = module.params['state'] - array = get_array(module) - volume = get_volume(module, array) - - if state == 'present': - if not volume: - create_volume(module, array) - else: - update_volume(module, array, volume) - elif state == 'absent' and volume: - delete_volume(module, array, volume) - else: - module.exit_json(changed=False) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/storage/zfs/zfs.py b/ansible_collections/community/general/plugins/modules/storage/zfs/zfs.py deleted file mode 100644 index a804753a..00000000 --- a/ansible_collections/community/general/plugins/modules/storage/zfs/zfs.py +++ /dev/null @@ -1,297 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2013, Johan Wiren -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: zfs -short_description: Manage zfs -description: - - Manages ZFS file systems, volumes, clones and snapshots -options: - name: - description: - - File system, snapshot or volume name e.g. C(rpool/myfs). - required: true - type: str - state: - description: - - Whether to create (C(present)), or remove (C(absent)) a - file system, snapshot or volume. All parents/children - will be created/destroyed as needed to reach the desired state. - choices: [ absent, present ] - required: true - type: str - origin: - description: - - Snapshot from which to create a clone. - type: str - extra_zfs_properties: - description: - - A dictionary of zfs properties to be set. - - See the zfs(8) man page for more information. - type: dict -notes: - - C(check_mode) is supported, but in certain situations it may report a task - as changed that will not be reported as changed when C(check_mode) is disabled. - For example, this might occur when the zpool C(altroot) option is set or when - a size is written using human-readable notation, such as C(1M) or C(1024K), - instead of as an unqualified byte count, such as C(1048576). -author: -- Johan Wiren (@johanwiren) -''' - -EXAMPLES = ''' -- name: Create a new file system called myfs in pool rpool with the setuid property turned off - community.general.zfs: - name: rpool/myfs - state: present - extra_zfs_properties: - setuid: off - -- name: Create a new volume called myvol in pool rpool. - community.general.zfs: - name: rpool/myvol - state: present - extra_zfs_properties: - volsize: 10M - -- name: Create a snapshot of rpool/myfs file system. - community.general.zfs: - name: rpool/myfs@mysnapshot - state: present - -- name: Create a new file system called myfs2 with snapdir enabled - community.general.zfs: - name: rpool/myfs2 - state: present - extra_zfs_properties: - snapdir: enabled - -- name: Create a new file system by cloning a snapshot - community.general.zfs: - name: rpool/cloned_fs - state: present - origin: rpool/myfs@mysnapshot - -- name: Destroy a filesystem - community.general.zfs: - name: rpool/myfs - state: absent -''' - -import os - -from ansible.module_utils.basic import AnsibleModule - - -class Zfs(object): - - def __init__(self, module, name, properties): - self.module = module - self.name = name - self.properties = properties - self.changed = False - self.zfs_cmd = module.get_bin_path('zfs', True) - self.zpool_cmd = module.get_bin_path('zpool', True) - self.pool = name.split('/')[0].split('@')[0] - self.is_solaris = os.uname()[0] == 'SunOS' - self.is_openzfs = self.check_openzfs() - self.enhanced_sharing = self.check_enhanced_sharing() - - def check_openzfs(self): - cmd = [self.zpool_cmd] - cmd.extend(['get', 'version']) - cmd.append(self.pool) - (rc, out, err) = self.module.run_command(cmd, check_rc=True) - version = out.splitlines()[-1].split()[2] - if version == '-': - return True - if int(version) == 5000: - return True - return False - - def check_enhanced_sharing(self): - if self.is_solaris and not self.is_openzfs: - cmd = [self.zpool_cmd] - cmd.extend(['get', 'version']) - cmd.append(self.pool) - (rc, out, err) = self.module.run_command(cmd, check_rc=True) - version = out.splitlines()[-1].split()[2] - if int(version) >= 34: - return True - return False - - def exists(self): - cmd = [self.zfs_cmd, 'list', '-t', 'all', self.name] - (rc, out, err) = self.module.run_command(' '.join(cmd)) - if rc == 0: - return True - else: - return False - - def create(self): - if self.module.check_mode: - self.changed = True - return - properties = self.properties - origin = self.module.params.get('origin', None) - cmd = [self.zfs_cmd] - - if "@" in self.name: - action = 'snapshot' - elif origin: - action = 'clone' - else: - action = 'create' - - cmd.append(action) - - if action in ['create', 'clone']: - cmd += ['-p'] - - if properties: - for prop, value in properties.items(): - if prop == 'volsize': - cmd += ['-V', value] - elif prop == 'volblocksize': - cmd += ['-b', value] - else: - cmd += ['-o', '%s="%s"' % (prop, value)] - if origin and action == 'clone': - cmd.append(origin) - cmd.append(self.name) - (rc, out, err) = self.module.run_command(' '.join(cmd)) - if rc == 0: - self.changed = True - else: - self.module.fail_json(msg=err) - - def destroy(self): - if self.module.check_mode: - self.changed = True - return - cmd = [self.zfs_cmd, 'destroy', '-R', self.name] - (rc, out, err) = self.module.run_command(' '.join(cmd)) - if rc == 0: - self.changed = True - else: - self.module.fail_json(msg=err) - - def set_property(self, prop, value): - if self.module.check_mode: - self.changed = True - return - cmd = [self.zfs_cmd, 'set', prop + '=' + str(value), self.name] - (rc, out, err) = self.module.run_command(cmd) - if rc != 0: - self.module.fail_json(msg=err) - - def set_properties_if_changed(self): - diff = {'before': {'extra_zfs_properties': {}}, 'after': {'extra_zfs_properties': {}}} - current_properties = self.get_current_properties() - for prop, value in self.properties.items(): - current_value = current_properties.get(prop, None) - if current_value != value: - self.set_property(prop, value) - diff['before']['extra_zfs_properties'][prop] = current_value - diff['after']['extra_zfs_properties'][prop] = value - if self.module.check_mode: - return diff - updated_properties = self.get_current_properties() - for prop in self.properties: - value = updated_properties.get(prop, None) - if value is None: - self.module.fail_json(msg="zfsprop was not present after being successfully set: %s" % prop) - if current_properties.get(prop, None) != value: - self.changed = True - if prop in diff['after']['extra_zfs_properties']: - diff['after']['extra_zfs_properties'][prop] = value - return diff - - def get_current_properties(self): - cmd = [self.zfs_cmd, 'get', '-H', '-p', '-o', "property,value,source"] - if self.enhanced_sharing: - cmd += ['-e'] - cmd += ['all', self.name] - rc, out, err = self.module.run_command(" ".join(cmd)) - properties = dict() - for line in out.splitlines(): - prop, value, source = line.split('\t') - # include source '-' so that creation-only properties are not removed - # to avoids errors when the dataset already exists and the property is not changed - # this scenario is most likely when the same playbook is run more than once - if source == 'local' or source == 'received' or source == '-': - properties[prop] = value - # Add alias for enhanced sharing properties - if self.enhanced_sharing: - properties['sharenfs'] = properties.get('share.nfs', None) - properties['sharesmb'] = properties.get('share.smb', None) - return properties - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - name=dict(type='str', required=True), - state=dict(type='str', required=True, choices=['absent', 'present']), - origin=dict(type='str', default=None), - extra_zfs_properties=dict(type='dict', default={}), - ), - supports_check_mode=True, - ) - - state = module.params.get('state') - name = module.params.get('name') - - if module.params.get('origin') and '@' in name: - module.fail_json(msg='cannot specify origin when operating on a snapshot') - - # Reverse the boolification of zfs properties - for prop, value in module.params['extra_zfs_properties'].items(): - if isinstance(value, bool): - if value is True: - module.params['extra_zfs_properties'][prop] = 'on' - else: - module.params['extra_zfs_properties'][prop] = 'off' - else: - module.params['extra_zfs_properties'][prop] = value - - result = dict( - name=name, - state=state, - ) - - zfs = Zfs(module, name, module.params['extra_zfs_properties']) - - if state == 'present': - if zfs.exists(): - result['diff'] = zfs.set_properties_if_changed() - else: - zfs.create() - result['diff'] = {'before': {'state': 'absent'}, 'after': {'state': state}} - - elif state == 'absent': - if zfs.exists(): - zfs.destroy() - result['diff'] = {'before': {'state': 'present'}, 'after': {'state': state}} - else: - result['diff'] = {} - - result['diff']['before_header'] = name - result['diff']['after_header'] = name - - result.update(zfs.properties) - result['changed'] = zfs.changed - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/storage/zfs/zfs_delegate_admin.py b/ansible_collections/community/general/plugins/modules/storage/zfs/zfs_delegate_admin.py deleted file mode 100644 index ead40411..00000000 --- a/ansible_collections/community/general/plugins/modules/storage/zfs/zfs_delegate_admin.py +++ /dev/null @@ -1,265 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2015, Nate Coraor -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: zfs_delegate_admin -short_description: Manage ZFS delegated administration (user admin privileges) -description: - - Manages ZFS file system delegated administration permissions, which allow unprivileged users to perform ZFS - operations normally restricted to the superuser. - - See the C(zfs allow) section of C(zfs(1M)) for detailed explanations of options. - - This module attempts to adhere to the behavior of the command line tool as much as possible. -requirements: - - "A ZFS/OpenZFS implementation that supports delegation with `zfs allow`, including: Solaris >= 10, illumos (all - versions), FreeBSD >= 8.0R, ZFS on Linux >= 0.7.0." -options: - name: - description: - - File system or volume name e.g. C(rpool/myfs). - required: true - type: str - state: - description: - - Whether to allow (C(present)), or unallow (C(absent)) a permission. - - When set to C(present), at least one "entity" param of I(users), I(groups), or I(everyone) are required. - - When set to C(absent), removes permissions from the specified entities, or removes all permissions if no entity params are specified. - choices: [ absent, present ] - default: present - type: str - users: - description: - - List of users to whom permission(s) should be granted. - type: list - elements: str - groups: - description: - - List of groups to whom permission(s) should be granted. - type: list - elements: str - everyone: - description: - - Apply permissions to everyone. - type: bool - default: no - permissions: - description: - - The list of permission(s) to delegate (required if C(state) is C(present)). - - Supported permissions depend on the ZFS version in use. See for example - U(https://openzfs.github.io/openzfs-docs/man/8/zfs-allow.8.html) for OpenZFS. - type: list - elements: str - local: - description: - - Apply permissions to C(name) locally (C(zfs allow -l)). - type: bool - descendents: - description: - - Apply permissions to C(name)'s descendents (C(zfs allow -d)). - type: bool - recursive: - description: - - Unallow permissions recursively (ignored when C(state) is C(present)). - type: bool - default: no -author: -- Nate Coraor (@natefoo) -''' - -EXAMPLES = r''' -- name: Grant `zfs allow` and `unallow` permission to the `adm` user with the default local+descendents scope - community.general.zfs_delegate_admin: - name: rpool/myfs - users: adm - permissions: allow,unallow - -- name: Grant `zfs send` to everyone, plus the group `backup` - community.general.zfs_delegate_admin: - name: rpool/myvol - groups: backup - everyone: yes - permissions: send - -- name: Grant `zfs send,receive` to users `foo` and `bar` with local scope only - community.general.zfs_delegate_admin: - name: rpool/myfs - users: foo,bar - permissions: send,receive - local: yes - -- name: Revoke all permissions from everyone (permissions specifically assigned to users and groups remain) - community.general.zfs_delegate_admin: - name: rpool/myfs - everyone: yes - state: absent -''' - -# This module does not return anything other than the standard -# changed/state/msg/stdout -RETURN = ''' -''' - -from itertools import product - -from ansible.module_utils.basic import AnsibleModule - - -class ZfsDelegateAdmin(object): - def __init__(self, module): - self.module = module - self.name = module.params.get('name') - self.state = module.params.get('state') - self.users = module.params.get('users') - self.groups = module.params.get('groups') - self.everyone = module.params.get('everyone') - self.perms = module.params.get('permissions') - self.scope = None - self.changed = False - self.initial_perms = None - self.subcommand = 'allow' - self.recursive_opt = [] - self.run_method = self.update - - self.setup(module) - - def setup(self, module): - """ Validate params and set up for run. - """ - if self.state == 'absent': - self.subcommand = 'unallow' - if module.params.get('recursive'): - self.recursive_opt = ['-r'] - - local = module.params.get('local') - descendents = module.params.get('descendents') - if (local and descendents) or (not local and not descendents): - self.scope = 'ld' - elif local: - self.scope = 'l' - elif descendents: - self.scope = 'd' - else: - self.module.fail_json(msg='Impossible value for local and descendents') - - if not (self.users or self.groups or self.everyone): - if self.state == 'present': - self.module.fail_json(msg='One of `users`, `groups`, or `everyone` must be set') - elif self.state == 'absent': - self.run_method = self.clear - # ansible ensures the else cannot happen here - - self.zfs_path = module.get_bin_path('zfs', True) - - @property - def current_perms(self): - """ Parse the output of `zfs allow ` to retrieve current permissions. - """ - out = self.run_zfs_raw(subcommand='allow') - perms = { - 'l': {'u': {}, 'g': {}, 'e': []}, - 'd': {'u': {}, 'g': {}, 'e': []}, - 'ld': {'u': {}, 'g': {}, 'e': []}, - } - linemap = { - 'Local permissions:': 'l', - 'Descendent permissions:': 'd', - 'Local+Descendent permissions:': 'ld', - } - scope = None - for line in out.splitlines(): - scope = linemap.get(line, scope) - if not scope: - continue - try: - if line.startswith('\tuser ') or line.startswith('\tgroup '): - ent_type, ent, cur_perms = line.split() - perms[scope][ent_type[0]][ent] = cur_perms.split(',') - elif line.startswith('\teveryone '): - perms[scope]['e'] = line.split()[1].split(',') - except ValueError: - self.module.fail_json(msg="Cannot parse user/group permission output by `zfs allow`: '%s'" % line) - return perms - - def run_zfs_raw(self, subcommand=None, args=None): - """ Run a raw zfs command, fail on error. - """ - cmd = [self.zfs_path, subcommand or self.subcommand] + (args or []) + [self.name] - rc, out, err = self.module.run_command(cmd) - if rc: - self.module.fail_json(msg='Command `%s` failed: %s' % (' '.join(cmd), err)) - return out - - def run_zfs(self, args): - """ Run zfs allow/unallow with appropriate options as per module arguments. - """ - args = self.recursive_opt + ['-' + self.scope] + args - if self.perms: - args.append(','.join(self.perms)) - return self.run_zfs_raw(args=args) - - def clear(self): - """ Called by run() to clear all permissions. - """ - changed = False - stdout = '' - for scope, ent_type in product(('ld', 'l', 'd'), ('u', 'g')): - for ent in self.initial_perms[scope][ent_type].keys(): - stdout += self.run_zfs(['-%s' % ent_type, ent]) - changed = True - for scope in ('ld', 'l', 'd'): - if self.initial_perms[scope]['e']: - stdout += self.run_zfs(['-e']) - changed = True - return (changed, stdout) - - def update(self): - """ Update permissions as per module arguments. - """ - stdout = '' - for ent_type, entities in (('u', self.users), ('g', self.groups)): - if entities: - stdout += self.run_zfs(['-%s' % ent_type, ','.join(entities)]) - if self.everyone: - stdout += self.run_zfs(['-e']) - return (self.initial_perms != self.current_perms, stdout) - - def run(self): - """ Run an operation, return results for Ansible. - """ - exit_args = {'state': self.state} - self.initial_perms = self.current_perms - exit_args['changed'], stdout = self.run_method() - if exit_args['changed']: - exit_args['msg'] = 'ZFS delegated admin permissions updated' - exit_args['stdout'] = stdout - self.module.exit_json(**exit_args) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['absent', 'present']), - users=dict(type='list', elements='str'), - groups=dict(type='list', elements='str'), - everyone=dict(type='bool', default=False), - permissions=dict(type='list', elements='str'), - local=dict(type='bool'), - descendents=dict(type='bool'), - recursive=dict(type='bool', default=False), - ), - supports_check_mode=False, - required_if=[('state', 'present', ['permissions'])], - ) - zfs_delegate_admin = ZfsDelegateAdmin(module) - zfs_delegate_admin.run() - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/storage/zfs/zfs_facts.py b/ansible_collections/community/general/plugins/modules/storage/zfs/zfs_facts.py deleted file mode 100644 index cb106de1..00000000 --- a/ansible_collections/community/general/plugins/modules/storage/zfs/zfs_facts.py +++ /dev/null @@ -1,254 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2016, Adam Števko -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: zfs_facts -short_description: Gather facts about ZFS datasets. -description: - - Gather facts from ZFS dataset properties. -author: Adam Števko (@xen0l) -options: - name: - description: - - ZFS dataset name. - required: yes - aliases: [ "ds", "dataset" ] - type: str - recurse: - description: - - Specifies if properties for any children should be recursively - displayed. - type: bool - default: 'no' - parsable: - description: - - Specifies if property values should be displayed in machine - friendly format. - type: bool - default: 'no' - properties: - description: - - Specifies which dataset properties should be queried in comma-separated format. - For more information about dataset properties, check zfs(1M) man page. - default: all - type: str - type: - description: - - Specifies which datasets types to display. Multiple values have to be - provided in comma-separated form. - choices: [ 'all', 'filesystem', 'volume', 'snapshot', 'bookmark' ] - default: all - type: str - depth: - description: - - Specifies recursion depth. - type: int -''' - -EXAMPLES = ''' -- name: Gather facts about ZFS dataset rpool/export/home - community.general.zfs_facts: - dataset: rpool/export/home - -- name: Report space usage on ZFS filesystems under data/home - community.general.zfs_facts: - name: data/home - recurse: yes - type: filesystem - -- ansible.builtin.debug: - msg: 'ZFS dataset {{ item.name }} consumes {{ item.used }} of disk space.' - with_items: '{{ ansible_zfs_datasets }}' -''' - -RETURN = ''' -name: - description: ZFS dataset name - returned: always - type: str - sample: rpool/var/spool -parsable: - description: if parsable output should be provided in machine friendly format. - returned: if 'parsable' is set to True - type: bool - sample: True -recurse: - description: if we should recurse over ZFS dataset - returned: if 'recurse' is set to True - type: bool - sample: True -zfs_datasets: - description: ZFS dataset facts - returned: always - type: str - sample: - { - "aclinherit": "restricted", - "aclmode": "discard", - "atime": "on", - "available": "43.8G", - "canmount": "on", - "casesensitivity": "sensitive", - "checksum": "on", - "compression": "off", - "compressratio": "1.00x", - "copies": "1", - "creation": "Thu Jun 16 11:37 2016", - "dedup": "off", - "devices": "on", - "exec": "on", - "filesystem_count": "none", - "filesystem_limit": "none", - "logbias": "latency", - "logicalreferenced": "18.5K", - "logicalused": "3.45G", - "mlslabel": "none", - "mounted": "yes", - "mountpoint": "/rpool", - "name": "rpool", - "nbmand": "off", - "normalization": "none", - "org.openindiana.caiman:install": "ready", - "primarycache": "all", - "quota": "none", - "readonly": "off", - "recordsize": "128K", - "redundant_metadata": "all", - "refcompressratio": "1.00x", - "referenced": "29.5K", - "refquota": "none", - "refreservation": "none", - "reservation": "none", - "secondarycache": "all", - "setuid": "on", - "sharenfs": "off", - "sharesmb": "off", - "snapdir": "hidden", - "snapshot_count": "none", - "snapshot_limit": "none", - "sync": "standard", - "type": "filesystem", - "used": "4.41G", - "usedbychildren": "4.41G", - "usedbydataset": "29.5K", - "usedbyrefreservation": "0", - "usedbysnapshots": "0", - "utf8only": "off", - "version": "5", - "vscan": "off", - "written": "29.5K", - "xattr": "on", - "zoned": "off" - } -''' - -from collections import defaultdict - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six import iteritems - - -SUPPORTED_TYPES = ['all', 'filesystem', 'volume', 'snapshot', 'bookmark'] - - -class ZFSFacts(object): - def __init__(self, module): - - self.module = module - - self.name = module.params['name'] - self.recurse = module.params['recurse'] - self.parsable = module.params['parsable'] - self.properties = module.params['properties'] - self.type = module.params['type'] - self.depth = module.params['depth'] - - self._datasets = defaultdict(dict) - self.facts = [] - - def dataset_exists(self): - cmd = [self.module.get_bin_path('zfs'), 'list', self.name] - - (rc, out, err) = self.module.run_command(cmd) - - if rc == 0: - return True - else: - return False - - def get_facts(self): - cmd = [self.module.get_bin_path('zfs'), 'get', '-H'] - if self.parsable: - cmd.append('-p') - if self.recurse: - cmd.append('-r') - if int(self.depth) != 0: - cmd.append('-d') - cmd.append('%s' % self.depth) - if self.type: - cmd.append('-t') - cmd.append(self.type) - cmd.extend(['-o', 'name,property,value', self.properties, self.name]) - - (rc, out, err) = self.module.run_command(cmd) - - if rc == 0: - for line in out.splitlines(): - dataset, property, value = line.split('\t') - - self._datasets[dataset].update({property: value}) - - for k, v in iteritems(self._datasets): - v.update({'name': k}) - self.facts.append(v) - - return {'ansible_zfs_datasets': self.facts} - else: - self.module.fail_json(msg='Error while trying to get facts about ZFS dataset: %s' % self.name, - stderr=err, - rc=rc) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True, aliases=['ds', 'dataset'], type='str'), - recurse=dict(required=False, default=False, type='bool'), - parsable=dict(required=False, default=False, type='bool'), - properties=dict(required=False, default='all', type='str'), - type=dict(required=False, default='all', type='str', choices=SUPPORTED_TYPES), - depth=dict(required=False, default=0, type='int') - ), - supports_check_mode=True - ) - - zfs_facts = ZFSFacts(module) - - result = {} - result['changed'] = False - result['name'] = zfs_facts.name - - if zfs_facts.parsable: - result['parsable'] = zfs_facts.parsable - - if zfs_facts.recurse: - result['recurse'] = zfs_facts.recurse - - if zfs_facts.dataset_exists(): - result['ansible_facts'] = zfs_facts.get_facts() - else: - module.fail_json(msg='ZFS dataset %s does not exist!' % zfs_facts.name) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/storage/zfs/zpool_facts.py b/ansible_collections/community/general/plugins/modules/storage/zfs/zpool_facts.py deleted file mode 100644 index b7a66255..00000000 --- a/ansible_collections/community/general/plugins/modules/storage/zfs/zpool_facts.py +++ /dev/null @@ -1,194 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2016, Adam Števko -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: zpool_facts -short_description: Gather facts about ZFS pools. -description: - - Gather facts from ZFS pool properties. -author: Adam Števko (@xen0l) -options: - name: - description: - - ZFS pool name. - type: str - aliases: [ "pool", "zpool" ] - required: false - parsable: - description: - - Specifies if property values should be displayed in machine - friendly format. - type: bool - default: False - required: false - properties: - description: - - Specifies which dataset properties should be queried in comma-separated format. - For more information about dataset properties, check zpool(1M) man page. - type: str - default: all - required: false -''' - -EXAMPLES = ''' -- name: Gather facts about ZFS pool rpool - community.general.zpool_facts: pool=rpool - -- name: Gather space usage about all imported ZFS pools - community.general.zpool_facts: properties='free,size' - -- name: Print gathered information - ansible.builtin.debug: - msg: 'ZFS pool {{ item.name }} has {{ item.free }} free space out of {{ item.size }}.' - with_items: '{{ ansible_zfs_pools }}' -''' - -RETURN = ''' -ansible_facts: - description: Dictionary containing all the detailed information about the ZFS pool facts - returned: always - type: complex - contains: - ansible_zfs_pools: - description: ZFS pool facts - returned: always - type: str - sample: - { - "allocated": "3.46G", - "altroot": "-", - "autoexpand": "off", - "autoreplace": "off", - "bootfs": "rpool/ROOT/openindiana", - "cachefile": "-", - "capacity": "6%", - "comment": "-", - "dedupditto": "0", - "dedupratio": "1.00x", - "delegation": "on", - "expandsize": "-", - "failmode": "wait", - "feature@async_destroy": "enabled", - "feature@bookmarks": "enabled", - "feature@edonr": "enabled", - "feature@embedded_data": "active", - "feature@empty_bpobj": "active", - "feature@enabled_txg": "active", - "feature@extensible_dataset": "enabled", - "feature@filesystem_limits": "enabled", - "feature@hole_birth": "active", - "feature@large_blocks": "enabled", - "feature@lz4_compress": "active", - "feature@multi_vdev_crash_dump": "enabled", - "feature@sha512": "enabled", - "feature@skein": "enabled", - "feature@spacemap_histogram": "active", - "fragmentation": "3%", - "free": "46.3G", - "freeing": "0", - "guid": "15729052870819522408", - "health": "ONLINE", - "leaked": "0", - "listsnapshots": "off", - "name": "rpool", - "readonly": "off", - "size": "49.8G", - "version": "-" - } -name: - description: ZFS pool name - returned: always - type: str - sample: rpool -parsable: - description: if parsable output should be provided in machine friendly format. - returned: if 'parsable' is set to True - type: bool - sample: True -''' - -from collections import defaultdict - -from ansible.module_utils.six import iteritems -from ansible.module_utils.basic import AnsibleModule - - -class ZPoolFacts(object): - def __init__(self, module): - - self.module = module - self.name = module.params['name'] - self.parsable = module.params['parsable'] - self.properties = module.params['properties'] - self._pools = defaultdict(dict) - self.facts = [] - - def pool_exists(self): - cmd = [self.module.get_bin_path('zpool'), 'list', self.name] - rc, dummy, dummy = self.module.run_command(cmd) - return rc == 0 - - def get_facts(self): - cmd = [self.module.get_bin_path('zpool'), 'get', '-H'] - if self.parsable: - cmd.append('-p') - cmd.append('-o') - cmd.append('name,property,value') - cmd.append(self.properties) - if self.name: - cmd.append(self.name) - - rc, out, err = self.module.run_command(cmd, check_rc=True) - - for line in out.splitlines(): - pool, prop, value = line.split('\t') - - self._pools[pool].update({prop: value}) - - for k, v in iteritems(self._pools): - v.update({'name': k}) - self.facts.append(v) - - return {'ansible_zfs_pools': self.facts} - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(aliases=['pool', 'zpool'], type='str'), - parsable=dict(default=False, type='bool'), - properties=dict(default='all', type='str'), - ), - supports_check_mode=True - ) - - zpool_facts = ZPoolFacts(module) - - result = { - 'changed': False, - 'name': zpool_facts.name, - } - if zpool_facts.parsable: - result['parsable'] = zpool_facts.parsable - - if zpool_facts.name is not None: - if zpool_facts.pool_exists(): - result['ansible_facts'] = zpool_facts.get_facts() - else: - module.fail_json(msg='ZFS pool %s does not exist!' % zpool_facts.name) - else: - result['ansible_facts'] = zpool_facts.get_facts() - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/sudoers.py b/ansible_collections/community/general/plugins/modules/sudoers.py deleted file mode 120000 index 1bb579bf..00000000 --- a/ansible_collections/community/general/plugins/modules/sudoers.py +++ /dev/null @@ -1 +0,0 @@ -system/sudoers.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/supervisorctl.py b/ansible_collections/community/general/plugins/modules/supervisorctl.py deleted file mode 120000 index 8b22b64f..00000000 --- a/ansible_collections/community/general/plugins/modules/supervisorctl.py +++ /dev/null @@ -1 +0,0 @@ -web_infrastructure/supervisorctl.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/svc.py b/ansible_collections/community/general/plugins/modules/svc.py deleted file mode 120000 index 49e7fef4..00000000 --- a/ansible_collections/community/general/plugins/modules/svc.py +++ /dev/null @@ -1 +0,0 @@ -system/svc.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/svr4pkg.py b/ansible_collections/community/general/plugins/modules/svr4pkg.py deleted file mode 120000 index 4bc186b5..00000000 --- a/ansible_collections/community/general/plugins/modules/svr4pkg.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/svr4pkg.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/swdepot.py b/ansible_collections/community/general/plugins/modules/swdepot.py deleted file mode 120000 index c4d7ca7e..00000000 --- a/ansible_collections/community/general/plugins/modules/swdepot.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/swdepot.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/swupd.py b/ansible_collections/community/general/plugins/modules/swupd.py deleted file mode 120000 index 4a9eeacc..00000000 --- a/ansible_collections/community/general/plugins/modules/swupd.py +++ /dev/null @@ -1 +0,0 @@ -packaging/os/swupd.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/syslogger.py b/ansible_collections/community/general/plugins/modules/syslogger.py deleted file mode 120000 index 8bbf94cb..00000000 --- a/ansible_collections/community/general/plugins/modules/syslogger.py +++ /dev/null @@ -1 +0,0 @@ -notification/syslogger.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/syspatch.py b/ansible_collections/community/general/plugins/modules/syspatch.py deleted file mode 120000 index 9be12fbf..00000000 --- a/ansible_collections/community/general/plugins/modules/syspatch.py +++ /dev/null @@ -1 +0,0 @@ -system/syspatch.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/sysrc.py b/ansible_collections/community/general/plugins/modules/sysrc.py deleted file mode 120000 index 1a7959a3..00000000 --- a/ansible_collections/community/general/plugins/modules/sysrc.py +++ /dev/null @@ -1 +0,0 @@ -system/sysrc.py \ No newline at end of file diff --git a/ansible_collections/community/general/plugins/modules/system/aix_devices.py b/ansible_collections/community/general/plugins/modules/system/aix_devices.py deleted file mode 100644 index 89468059..00000000 --- a/ansible_collections/community/general/plugins/modules/system/aix_devices.py +++ /dev/null @@ -1,369 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, 2018 Kairo Araujo -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -author: -- Kairo Araujo (@kairoaraujo) -module: aix_devices -short_description: Manages AIX devices -description: -- This module discovers, defines, removes and modifies attributes of AIX devices. -options: - attributes: - description: - - A list of device attributes. - type: dict - device: - description: - - The name of the device. - - C(all) is valid to rescan C(available) all devices (AIX cfgmgr command). - type: str - force: - description: - - Forces action. - type: bool - default: no - recursive: - description: - - Removes or defines a device and children devices. - type: bool - default: no - state: - description: - - Controls the device state. - - C(available) (alias C(present)) rescan a specific device or all devices (when C(device) is not specified). - - C(removed) (alias C(absent) removes a device. - - C(defined) changes device to Defined state. - type: str - choices: [ available, defined, removed ] - default: available -''' - -EXAMPLES = r''' -- name: Scan new devices - community.general.aix_devices: - device: all - state: available - -- name: Scan new virtual devices (vio0) - community.general.aix_devices: - device: vio0 - state: available - -- name: Removing IP alias to en0 - community.general.aix_devices: - device: en0 - attributes: - delalias4: 10.0.0.100,255.255.255.0 - -- name: Removes ent2 - community.general.aix_devices: - device: ent2 - state: removed - -- name: Put device en2 in Defined - community.general.aix_devices: - device: en2 - state: defined - -- name: Removes ent4 (inexistent). - community.general.aix_devices: - device: ent4 - state: removed - -- name: Put device en4 in Defined (inexistent) - community.general.aix_devices: - device: en4 - state: defined - -- name: Put vscsi1 and children devices in Defined state. - community.general.aix_devices: - device: vscsi1 - recursive: yes - state: defined - -- name: Removes vscsi1 and children devices. - community.general.aix_devices: - device: vscsi1 - recursive: yes - state: removed - -- name: Changes en1 mtu to 9000 and disables arp. - community.general.aix_devices: - device: en1 - attributes: - mtu: 900 - arp: off - state: available - -- name: Configure IP, netmask and set en1 up. - community.general.aix_devices: - device: en1 - attributes: - netaddr: 192.168.0.100 - netmask: 255.255.255.0 - state: up - state: available - -- name: Adding IP alias to en0 - community.general.aix_devices: - device: en0 - attributes: - alias4: 10.0.0.100,255.255.255.0 - state: available -''' - -RETURN = r''' # ''' - -from ansible.module_utils.basic import AnsibleModule - - -def _check_device(module, device): - """ - Check if device already exists and the state. - Args: - module: Ansible module. - device: device to be checked. - - Returns: bool, device state - - """ - lsdev_cmd = module.get_bin_path('lsdev', True) - rc, lsdev_out, err = module.run_command(["%s" % lsdev_cmd, '-C', '-l', "%s" % device]) - - if rc != 0: - module.fail_json(msg="Failed to run lsdev", rc=rc, err=err) - - if lsdev_out: - device_state = lsdev_out.split()[1] - return True, device_state - - device_state = None - return False, device_state - - -def _check_device_attr(module, device, attr): - """ - - Args: - module: Ansible module. - device: device to check attributes. - attr: attribute to be checked. - - Returns: - - """ - lsattr_cmd = module.get_bin_path('lsattr', True) - rc, lsattr_out, err = module.run_command(["%s" % lsattr_cmd, '-El', "%s" % device, '-a', "%s" % attr]) - - hidden_attrs = ['delalias4', 'delalias6'] - - if rc == 255: - - if attr in hidden_attrs: - current_param = '' - else: - current_param = None - - return current_param - - elif rc != 0: - module.fail_json(msg="Failed to run lsattr: %s" % err, rc=rc, err=err) - - current_param = lsattr_out.split()[1] - return current_param - - -def discover_device(module, device): - """ Discover AIX devices.""" - cfgmgr_cmd = module.get_bin_path('cfgmgr', True) - - if device is not None: - device = "-l %s" % device - - else: - device = '' - - changed = True - msg = '' - if not module.check_mode: - rc, cfgmgr_out, err = module.run_command(["%s" % cfgmgr_cmd, "%s" % device]) - changed = True - msg = cfgmgr_out - - return changed, msg - - -def change_device_attr(module, attributes, device, force): - """ Change AIX device attribute. """ - - attr_changed = [] - attr_not_changed = [] - attr_invalid = [] - chdev_cmd = module.get_bin_path('chdev', True) - - for attr in list(attributes.keys()): - new_param = attributes[attr] - current_param = _check_device_attr(module, device, attr) - - if current_param is None: - attr_invalid.append(attr) - - elif current_param != new_param: - if force: - cmd = ["%s" % chdev_cmd, '-l', "%s" % device, '-a', "%s=%s" % (attr, attributes[attr]), "%s" % force] - else: - cmd = ["%s" % chdev_cmd, '-l', "%s" % device, '-a', "%s=%s" % (attr, attributes[attr])] - - if not module.check_mode: - rc, chdev_out, err = module.run_command(cmd) - if rc != 0: - module.exit_json(msg="Failed to run chdev.", rc=rc, err=err) - - attr_changed.append(attributes[attr]) - else: - attr_not_changed.append(attributes[attr]) - - if len(attr_changed) > 0: - changed = True - attr_changed_msg = "Attributes changed: %s. " % ','.join(attr_changed) - else: - changed = False - attr_changed_msg = '' - - if len(attr_not_changed) > 0: - attr_not_changed_msg = "Attributes already set: %s. " % ','.join(attr_not_changed) - else: - attr_not_changed_msg = '' - - if len(attr_invalid) > 0: - attr_invalid_msg = "Invalid attributes: %s " % ', '.join(attr_invalid) - else: - attr_invalid_msg = '' - - msg = "%s%s%s" % (attr_changed_msg, attr_not_changed_msg, attr_invalid_msg) - - return changed, msg - - -def remove_device(module, device, force, recursive, state): - """ Puts device in defined state or removes device. """ - - state_opt = { - 'removed': '-d', - 'absent': '-d', - 'defined': '' - } - - recursive_opt = { - True: '-R', - False: '' - } - - recursive = recursive_opt[recursive] - state = state_opt[state] - - changed = True - msg = '' - rmdev_cmd = module.get_bin_path('rmdev', True) - - if not module.check_mode: - if state: - rc, rmdev_out, err = module.run_command(["%s" % rmdev_cmd, "-l", "%s" % device, "%s" % recursive, "%s" % force]) - else: - rc, rmdev_out, err = module.run_command(["%s" % rmdev_cmd, "-l", "%s" % device, "%s" % recursive]) - - if rc != 0: - module.fail_json(msg="Failed to run rmdev", rc=rc, err=err) - - msg = rmdev_out - - return changed, msg - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - attributes=dict(type='dict'), - device=dict(type='str'), - force=dict(type='bool', default=False), - recursive=dict(type='bool', default=False), - state=dict(type='str', default='available', choices=['available', 'defined', 'removed']), - ), - supports_check_mode=True, - ) - - force_opt = { - True: '-f', - False: '', - } - - attributes = module.params['attributes'] - device = module.params['device'] - force = force_opt[module.params['force']] - recursive = module.params['recursive'] - state = module.params['state'] - - result = dict( - changed=False, - msg='', - ) - - if state == 'available' or state == 'present': - if attributes: - # change attributes on device - device_status, device_state = _check_device(module, device) - if device_status: - result['changed'], result['msg'] = change_device_attr(module, attributes, device, force) - else: - result['msg'] = "Device %s does not exist." % device - - else: - # discovery devices (cfgmgr) - if device and device != 'all': - device_status, device_state = _check_device(module, device) - if device_status: - # run cfgmgr on specific device - result['changed'], result['msg'] = discover_device(module, device) - - else: - result['msg'] = "Device %s does not exist." % device - - else: - result['changed'], result['msg'] = discover_device(module, device) - - elif state == 'removed' or state == 'absent' or state == 'defined': - if not device: - result['msg'] = "device is required to removed or defined state." - - else: - # Remove device - check_device, device_state = _check_device(module, device) - if check_device: - if state == 'defined' and device_state == 'Defined': - result['changed'] = False - result['msg'] = 'Device %s already in Defined' % device - - else: - result['changed'], result['msg'] = remove_device(module, device, force, recursive, state) - - else: - result['msg'] = "Device %s does not exist." % device - - else: - result['msg'] = "Unexpected state %s." % state - module.fail_json(**result) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/system/aix_filesystem.py b/ansible_collections/community/general/plugins/modules/system/aix_filesystem.py deleted file mode 100644 index a47c29f0..00000000 --- a/ansible_collections/community/general/plugins/modules/system/aix_filesystem.py +++ /dev/null @@ -1,563 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, Kairo Araujo -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = r''' ---- -author: - - Kairo Araujo (@kairoaraujo) -module: aix_filesystem -short_description: Configure LVM and NFS file systems for AIX -description: - - This module creates, removes, mount and unmount LVM and NFS file system for - AIX using C(/etc/filesystems). - - For LVM file systems is possible to resize a file system. -options: - account_subsystem: - description: - - Specifies whether the file system is to be processed by the accounting subsystem. - type: bool - default: no - attributes: - description: - - Specifies attributes for files system separated by comma. - type: list - elements: str - default: agblksize='4096',isnapshot='no' - auto_mount: - description: - - File system is automatically mounted at system restart. - type: bool - default: yes - device: - description: - - Logical volume (LV) device name or remote export device to create a NFS file system. - - It is used to create a file system on an already existing logical volume or the exported NFS file system. - - If not mentioned a new logical volume name will be created following AIX standards (LVM). - type: str - fs_type: - description: - - Specifies the virtual file system type. - type: str - default: jfs2 - permissions: - description: - - Set file system permissions. C(rw) (read-write) or C(ro) (read-only). - type: str - choices: [ ro, rw ] - default: rw - mount_group: - description: - - Specifies the mount group. - type: str - filesystem: - description: - - Specifies the mount point, which is the directory where the file system will be mounted. - type: str - required: true - nfs_server: - description: - - Specifies a Network File System (NFS) server. - type: str - rm_mount_point: - description: - - Removes the mount point directory when used with state C(absent). - type: bool - default: no - size: - description: - - Specifies the file system size. - - For already C(present) it will be resized. - - 512-byte blocks, Megabytes or Gigabytes. If the value has M specified - it will be in Megabytes. If the value has G specified it will be in - Gigabytes. - - If no M or G the value will be 512-byte blocks. - - If "+" is specified in begin of value, the value will be added. - - If "-" is specified in begin of value, the value will be removed. - - If "+" or "-" is not specified, the total value will be the specified. - - Size will respects the LVM AIX standards. - type: str - state: - description: - - Controls the file system state. - - C(present) check if file system exists, creates or resize. - - C(absent) removes existing file system if already C(unmounted). - - C(mounted) checks if the file system is mounted or mount the file system. - - C(unmounted) check if the file system is unmounted or unmount the file system. - type: str - choices: [ absent, mounted, present, unmounted ] - default: present - vg: - description: - - Specifies an existing volume group (VG). - type: str -notes: - - For more C(attributes), please check "crfs" AIX manual. -''' - -EXAMPLES = r''' -- name: Create filesystem in a previously defined logical volume. - community.general.aix_filesystem: - device: testlv - community.general.filesystem: /testfs - state: present - -- name: Creating NFS filesystem from nfshost. - community.general.aix_filesystem: - device: /home/ftp - nfs_server: nfshost - community.general.filesystem: /home/ftp - state: present - -- name: Creating a new file system without a previously logical volume. - community.general.aix_filesystem: - community.general.filesystem: /newfs - size: 1G - state: present - vg: datavg - -- name: Unmounting /testfs. - community.general.aix_filesystem: - community.general.filesystem: /testfs - state: unmounted - -- name: Resizing /mksysb to +512M. - community.general.aix_filesystem: - community.general.filesystem: /mksysb - size: +512M - state: present - -- name: Resizing /mksysb to 11G. - community.general.aix_filesystem: - community.general.filesystem: /mksysb - size: 11G - state: present - -- name: Resizing /mksysb to -2G. - community.general.aix_filesystem: - community.general.filesystem: /mksysb - size: -2G - state: present - -- name: Remove NFS filesystem /home/ftp. - community.general.aix_filesystem: - community.general.filesystem: /home/ftp - rm_mount_point: yes - state: absent - -- name: Remove /newfs. - community.general.aix_filesystem: - community.general.filesystem: /newfs - rm_mount_point: yes - state: absent -''' - -RETURN = r''' -changed: - description: Return changed for aix_filesystems actions as true or false. - returned: always - type: bool -msg: - description: Return message regarding the action. - returned: always - type: str -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils._mount import ismount -import re - - -def _fs_exists(module, filesystem): - """ - Check if file system already exists on /etc/filesystems. - - :param module: Ansible module. - :param community.general.filesystem: filesystem name. - :return: True or False. - """ - lsfs_cmd = module.get_bin_path('lsfs', True) - rc, lsfs_out, err = module.run_command([lsfs_cmd, "-l", filesystem]) - if rc == 1: - if re.findall("No record matching", err): - return False - - else: - module.fail_json(msg="Failed to run lsfs. Error message: %s" % err) - - else: - - return True - - -def _check_nfs_device(module, nfs_host, device): - """ - Validate if NFS server is exporting the device (remote export). - - :param module: Ansible module. - :param nfs_host: nfs_host parameter, NFS server. - :param device: device parameter, remote export. - :return: True or False. - """ - showmount_cmd = module.get_bin_path('showmount', True) - rc, showmount_out, err = module.run_command([showmount_cmd, "-a", nfs_host]) - if rc != 0: - module.fail_json(msg="Failed to run showmount. Error message: %s" % err) - else: - showmount_data = showmount_out.splitlines() - for line in showmount_data: - if line.split(':')[1] == device: - return True - - return False - - -def _validate_vg(module, vg): - """ - Check the current state of volume group. - - :param module: Ansible module argument spec. - :param vg: Volume Group name. - :return: True (VG in varyon state) or False (VG in varyoff state) or - None (VG does not exist), message. - """ - lsvg_cmd = module.get_bin_path('lsvg', True) - rc, current_active_vgs, err = module.run_command([lsvg_cmd, "-o"]) - if rc != 0: - module.fail_json(msg="Failed executing %s command." % lsvg_cmd) - - rc, current_all_vgs, err = module.run_command([lsvg_cmd, "%s"]) - if rc != 0: - module.fail_json(msg="Failed executing %s command." % lsvg_cmd) - - if vg in current_all_vgs and vg not in current_active_vgs: - msg = "Volume group %s is in varyoff state." % vg - return False, msg - elif vg in current_active_vgs: - msg = "Volume group %s is in varyon state." % vg - return True, msg - else: - msg = "Volume group %s does not exist." % vg - return None, msg - - -def resize_fs(module, filesystem, size): - """ Resize LVM file system. """ - - chfs_cmd = module.get_bin_path('chfs', True) - if not module.check_mode: - rc, chfs_out, err = module.run_command([chfs_cmd, "-a", "size=%s" % size, filesystem]) - - if rc == 28: - changed = False - return changed, chfs_out - elif rc != 0: - if re.findall('Maximum allocation for logical', err): - changed = False - return changed, err - else: - module.fail_json(msg="Failed to run chfs. Error message: %s" % err) - - else: - if re.findall('The filesystem size is already', chfs_out): - changed = False - else: - changed = True - - return changed, chfs_out - else: - changed = True - msg = '' - - return changed, msg - - -def create_fs( - module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, - account_subsystem, permissions, nfs_server, attributes): - """ Create LVM file system or NFS remote mount point. """ - - attributes = ' -a '.join(attributes) - - # Parameters definition. - account_subsys_opt = { - True: '-t yes', - False: '-t no' - } - - if nfs_server is not None: - auto_mount_opt = { - True: '-A', - False: '-a' - } - - else: - auto_mount_opt = { - True: '-A yes', - False: '-A no' - } - - if size is None: - size = '' - else: - size = "-a size=%s" % size - - if device is None: - device = '' - else: - device = "-d %s" % device - - if vg is None: - vg = '' - else: - vg_state, msg = _validate_vg(module, vg) - if vg_state: - vg = "-g %s" % vg - else: - changed = False - - return changed, msg - - if mount_group is None: - mount_group = '' - - else: - mount_group = "-u %s" % mount_group - - auto_mount = auto_mount_opt[auto_mount] - account_subsystem = account_subsys_opt[account_subsystem] - - if nfs_server is not None: - # Creates a NFS file system. - mknfsmnt_cmd = module.get_bin_path('mknfsmnt', True) - if not module.check_mode: - rc, mknfsmnt_out, err = module.run_command([mknfsmnt_cmd, "-f", filesystem, device, "-h", nfs_server, "-t", permissions, auto_mount, "-w", "bg"]) - if rc != 0: - module.fail_json(msg="Failed to run mknfsmnt. Error message: %s" % err) - else: - changed = True - msg = "NFS file system %s created." % filesystem - - return changed, msg - else: - changed = True - msg = '' - - return changed, msg - - else: - # Creates a LVM file system. - crfs_cmd = module.get_bin_path('crfs', True) - if not module.check_mode: - cmd = [crfs_cmd, "-v", fs_type, "-m", filesystem, vg, device, mount_group, auto_mount, account_subsystem, "-p", permissions, size, "-a", attributes] - rc, crfs_out, err = module.run_command(cmd) - - if rc == 10: - module.exit_json( - msg="Using a existent previously defined logical volume, " - "volume group needs to be empty. %s" % err) - - elif rc != 0: - module.fail_json(msg="Failed to run %s. Error message: %s" % (cmd, err)) - - else: - changed = True - return changed, crfs_out - else: - changed = True - msg = '' - - return changed, msg - - -def remove_fs(module, filesystem, rm_mount_point): - """ Remove an LVM file system or NFS entry. """ - - # Command parameters. - rm_mount_point_opt = { - True: '-r', - False: '' - } - - rm_mount_point = rm_mount_point_opt[rm_mount_point] - - rmfs_cmd = module.get_bin_path('rmfs', True) - if not module.check_mode: - cmd = [rmfs_cmd, "-r", rm_mount_point, filesystem] - rc, rmfs_out, err = module.run_command(cmd) - if rc != 0: - module.fail_json(msg="Failed to run %s. Error message: %s" % (cmd, err)) - else: - changed = True - msg = rmfs_out - if not rmfs_out: - msg = "File system %s removed." % filesystem - - return changed, msg - else: - changed = True - msg = '' - - return changed, msg - - -def mount_fs(module, filesystem): - """ Mount a file system. """ - mount_cmd = module.get_bin_path('mount', True) - - if not module.check_mode: - rc, mount_out, err = module.run_command([mount_cmd, filesystem]) - if rc != 0: - module.fail_json(msg="Failed to run mount. Error message: %s" % err) - else: - changed = True - msg = "File system %s mounted." % filesystem - - return changed, msg - else: - changed = True - msg = '' - - return changed, msg - - -def unmount_fs(module, filesystem): - """ Unmount a file system.""" - unmount_cmd = module.get_bin_path('unmount', True) - - if not module.check_mode: - rc, unmount_out, err = module.run_command([unmount_cmd, filesystem]) - if rc != 0: - module.fail_json(msg="Failed to run unmount. Error message: %s" % err) - else: - changed = True - msg = "File system %s unmounted." % filesystem - - return changed, msg - else: - changed = True - msg = '' - - return changed, msg - - -def main(): - module = AnsibleModule( - argument_spec=dict( - account_subsystem=dict(type='bool', default=False), - attributes=dict(type='list', elements='str', default=["agblksize='4096'", "isnapshot='no'"]), - auto_mount=dict(type='bool', default=True), - device=dict(type='str'), - filesystem=dict(type='str', required=True), - fs_type=dict(type='str', default='jfs2'), - permissions=dict(type='str', default='rw', choices=['rw', 'ro']), - mount_group=dict(type='str'), - nfs_server=dict(type='str'), - rm_mount_point=dict(type='bool', default=False), - size=dict(type='str'), - state=dict(type='str', default='present', choices=['absent', 'mounted', 'present', 'unmounted']), - vg=dict(type='str'), - ), - supports_check_mode=True, - ) - - account_subsystem = module.params['account_subsystem'] - attributes = module.params['attributes'] - auto_mount = module.params['auto_mount'] - device = module.params['device'] - fs_type = module.params['fs_type'] - permissions = module.params['permissions'] - mount_group = module.params['mount_group'] - filesystem = module.params['filesystem'] - nfs_server = module.params['nfs_server'] - rm_mount_point = module.params['rm_mount_point'] - size = module.params['size'] - state = module.params['state'] - vg = module.params['vg'] - - result = dict( - changed=False, - msg='', - ) - - if state == 'present': - fs_mounted = ismount(filesystem) - fs_exists = _fs_exists(module, filesystem) - - # Check if fs is mounted or exists. - if fs_mounted or fs_exists: - result['msg'] = "File system %s already exists." % filesystem - result['changed'] = False - - # If parameter size was passed, resize fs. - if size is not None: - result['changed'], result['msg'] = resize_fs(module, filesystem, size) - - # If fs doesn't exist, create it. - else: - # Check if fs will be a NFS device. - if nfs_server is not None: - if device is None: - result['msg'] = 'Parameter "device" is required when "nfs_server" is defined.' - module.fail_json(**result) - else: - # Create a fs from NFS export. - if _check_nfs_device(module, nfs_server, device): - result['changed'], result['msg'] = create_fs( - module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes) - - if device is None: - if vg is None: - result['msg'] = 'Required parameter "device" and/or "vg" is missing for filesystem creation.' - module.fail_json(**result) - else: - # Create a fs from - result['changed'], result['msg'] = create_fs( - module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes) - - if device is not None and nfs_server is None: - # Create a fs from a previously lv device. - result['changed'], result['msg'] = create_fs( - module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes) - - elif state == 'absent': - if ismount(filesystem): - result['msg'] = "File system %s mounted." % filesystem - - else: - fs_status = _fs_exists(module, filesystem) - if not fs_status: - result['msg'] = "File system %s does not exist." % filesystem - else: - result['changed'], result['msg'] = remove_fs(module, filesystem, rm_mount_point) - - elif state == 'mounted': - if ismount(filesystem): - result['changed'] = False - result['msg'] = "File system %s already mounted." % filesystem - else: - result['changed'], result['msg'] = mount_fs(module, filesystem) - - elif state == 'unmounted': - if not ismount(filesystem): - result['changed'] = False - result['msg'] = "File system %s already unmounted." % filesystem - else: - result['changed'], result['msg'] = unmount_fs(module, filesystem) - - else: - # Unreachable codeblock - result['msg'] = "Unexpected state %s." % state - module.fail_json(**result) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/system/aix_inittab.py b/ansible_collections/community/general/plugins/modules/system/aix_inittab.py deleted file mode 100644 index c2daface..00000000 --- a/ansible_collections/community/general/plugins/modules/system/aix_inittab.py +++ /dev/null @@ -1,247 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, Joris Weijters -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -author: -- Joris Weijters (@molekuul) -module: aix_inittab -short_description: Manages the inittab on AIX -description: - - Manages the inittab on AIX. -options: - name: - description: - - Name of the inittab entry. - type: str - required: yes - aliases: [ service ] - runlevel: - description: - - Runlevel of the entry. - type: str - required: yes - action: - description: - - Action what the init has to do with this entry. - type: str - choices: - - boot - - bootwait - - hold - - initdefault - - 'off' - - once - - ondemand - - powerfail - - powerwait - - respawn - - sysinit - - wait - command: - description: - - What command has to run. - type: str - required: yes - insertafter: - description: - - After which inittabline should the new entry inserted. - type: str - state: - description: - - Whether the entry should be present or absent in the inittab file. - type: str - choices: [ absent, present ] - default: present -notes: - - The changes are persistent across reboots. - - You need root rights to read or adjust the inittab with the C(lsitab), C(chitab), C(mkitab) or C(rmitab) commands. - - Tested on AIX 7.1. -requirements: -- itertools -''' - -EXAMPLES = ''' -# Add service startmyservice to the inittab, directly after service existingservice. -- name: Add startmyservice to inittab - community.general.aix_inittab: - name: startmyservice - runlevel: 4 - action: once - command: echo hello - insertafter: existingservice - state: present - become: yes - -# Change inittab entry startmyservice to runlevel "2" and processaction "wait". -- name: Change startmyservice to inittab - community.general.aix_inittab: - name: startmyservice - runlevel: 2 - action: wait - command: echo hello - state: present - become: yes - -- name: Remove startmyservice from inittab - community.general.aix_inittab: - name: startmyservice - runlevel: 2 - action: wait - command: echo hello - state: absent - become: yes -''' - -RETURN = ''' -name: - description: Name of the adjusted inittab entry - returned: always - type: str - sample: startmyservice -msg: - description: Action done with the inittab entry - returned: changed - type: str - sample: changed inittab entry startmyservice -changed: - description: Whether the inittab changed or not - returned: always - type: bool - sample: true -''' - -# Import necessary libraries -try: - # python 2 - from itertools import izip -except ImportError: - izip = zip - -from ansible.module_utils.basic import AnsibleModule - -# end import modules -# start defining the functions - - -def check_current_entry(module): - # Check if entry exists, if not return False in exists in return dict, - # if true return True and the entry in return dict - existsdict = {'exist': False} - lsitab = module.get_bin_path('lsitab') - (rc, out, err) = module.run_command([lsitab, module.params['name']]) - if rc == 0: - keys = ('name', 'runlevel', 'action', 'command') - values = out.split(":") - # strip non readable characters as \n - values = map(lambda s: s.strip(), values) - existsdict = dict(izip(keys, values)) - existsdict.update({'exist': True}) - return existsdict - - -def main(): - # initialize - module = AnsibleModule( - argument_spec=dict( - name=dict(type='str', required=True, aliases=['service']), - runlevel=dict(type='str', required=True), - action=dict(type='str', choices=[ - 'boot', - 'bootwait', - 'hold', - 'initdefault', - 'off', - 'once', - 'ondemand', - 'powerfail', - 'powerwait', - 'respawn', - 'sysinit', - 'wait', - ]), - command=dict(type='str', required=True), - insertafter=dict(type='str'), - state=dict(type='str', default='present', choices=['absent', 'present']), - ), - supports_check_mode=True, - ) - - result = { - 'name': module.params['name'], - 'changed': False, - 'msg': "" - } - - # Find commandline strings - mkitab = module.get_bin_path('mkitab') - rmitab = module.get_bin_path('rmitab') - chitab = module.get_bin_path('chitab') - rc = 0 - - # check if the new entry exists - current_entry = check_current_entry(module) - - # if action is install or change, - if module.params['state'] == 'present': - - # create new entry string - new_entry = module.params['name'] + ":" + module.params['runlevel'] + \ - ":" + module.params['action'] + ":" + module.params['command'] - - # If current entry exists or fields are different(if the entry does not - # exists, then the entry wil be created - if (not current_entry['exist']) or ( - module.params['runlevel'] != current_entry['runlevel'] or - module.params['action'] != current_entry['action'] or - module.params['command'] != current_entry['command']): - - # If the entry does exist then change the entry - if current_entry['exist']: - if not module.check_mode: - (rc, out, err) = module.run_command([chitab, new_entry]) - if rc != 0: - module.fail_json( - msg="could not change inittab", rc=rc, err=err) - result['msg'] = "changed inittab entry" + " " + current_entry['name'] - result['changed'] = True - - # If the entry does not exist create the entry - elif not current_entry['exist']: - if module.params['insertafter']: - if not module.check_mode: - (rc, out, err) = module.run_command( - [mkitab, '-i', module.params['insertafter'], new_entry]) - else: - if not module.check_mode: - (rc, out, err) = module.run_command( - [mkitab, new_entry]) - - if rc != 0: - module.fail_json(msg="could not adjust inittab", rc=rc, err=err) - result['msg'] = "add inittab entry" + " " + module.params['name'] - result['changed'] = True - - elif module.params['state'] == 'absent': - # If the action is remove and the entry exists then remove the entry - if current_entry['exist']: - if not module.check_mode: - (rc, out, err) = module.run_command( - [rmitab, module.params['name']]) - if rc != 0: - module.fail_json( - msg="could not remove entry from inittab)", rc=rc, err=err) - result['msg'] = "removed inittab entry" + " " + current_entry['name'] - result['changed'] = True - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/system/aix_lvg.py b/ansible_collections/community/general/plugins/modules/system/aix_lvg.py deleted file mode 100644 index 37bf71a4..00000000 --- a/ansible_collections/community/general/plugins/modules/system/aix_lvg.py +++ /dev/null @@ -1,363 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, Kairo Araujo -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -author: -- Kairo Araujo (@kairoaraujo) -module: aix_lvg -short_description: Manage LVM volume groups on AIX -description: -- This module creates, removes or resize volume groups on AIX LVM. -options: - force: - description: - - Force volume group creation. - type: bool - default: no - pp_size: - description: - - The size of the physical partition in megabytes. - type: int - pvs: - description: - - List of comma-separated devices to use as physical devices in this volume group. - - Required when creating or extending (C(present) state) the volume group. - - If not informed reducing (C(absent) state) the volume group will be removed. - type: list - elements: str - state: - description: - - Control if the volume group exists and volume group AIX state varyonvg C(varyon) or varyoffvg C(varyoff). - type: str - choices: [ absent, present, varyoff, varyon ] - default: present - vg: - description: - - The name of the volume group. - type: str - required: true - vg_type: - description: - - The type of the volume group. - type: str - choices: [ big, normal, scalable ] - default: normal -notes: -- AIX will permit remove VG only if all LV/Filesystems are not busy. -- Module does not modify PP size for already present volume group. -''' - -EXAMPLES = r''' -- name: Create a volume group datavg - community.general.aix_lvg: - vg: datavg - pp_size: 128 - vg_type: scalable - state: present - -- name: Removing a volume group datavg - community.general.aix_lvg: - vg: datavg - state: absent - -- name: Extending rootvg - community.general.aix_lvg: - vg: rootvg - pvs: hdisk1 - state: present - -- name: Reducing rootvg - community.general.aix_lvg: - vg: rootvg - pvs: hdisk1 - state: absent -''' - -RETURN = r''' # ''' - -from ansible.module_utils.basic import AnsibleModule - - -def _validate_pv(module, vg, pvs): - """ - Function to validate if the physical volume (PV) is not already in use by - another volume group or Oracle ASM. - - :param module: Ansible module argument spec. - :param vg: Volume group name. - :param pvs: Physical volume list. - :return: [bool, message] or module.fail_json for errors. - """ - - lspv_cmd = module.get_bin_path('lspv', True) - rc, current_lspv, stderr = module.run_command([lspv_cmd]) - if rc != 0: - module.fail_json(msg="Failed executing 'lspv' command.", rc=rc, stdout=current_lspv, stderr=stderr) - - for pv in pvs: - # Get pv list. - lspv_list = {} - for line in current_lspv.splitlines(): - pv_data = line.split() - lspv_list[pv_data[0]] = pv_data[2] - - # Check if pv exists and is free. - if pv not in lspv_list.keys(): - module.fail_json(msg="Physical volume '%s' doesn't exist." % pv) - - if lspv_list[pv] == 'None': - # Disk None, looks free. - # Check if PV is not already in use by Oracle ASM. - lquerypv_cmd = module.get_bin_path('lquerypv', True) - rc, current_lquerypv, stderr = module.run_command([lquerypv_cmd, "-h", "/dev/%s" % pv, "20", "10"]) - if rc != 0: - module.fail_json(msg="Failed executing lquerypv command.", rc=rc, stdout=current_lquerypv, stderr=stderr) - - if 'ORCLDISK' in current_lquerypv: - module.fail_json("Physical volume '%s' is already used by Oracle ASM." % pv) - - msg = "Physical volume '%s' is ok to be used." % pv - return True, msg - - # Check if PV is already in use for the same vg. - elif vg != lspv_list[pv]: - module.fail_json(msg="Physical volume '%s' is in use by another volume group '%s'." % (pv, lspv_list[pv])) - - msg = "Physical volume '%s' is already used by volume group '%s'." % (pv, lspv_list[pv]) - return False, msg - - -def _validate_vg(module, vg): - """ - Check the current state of volume group. - - :param module: Ansible module argument spec. - :param vg: Volume Group name. - :return: True (VG in varyon state) or False (VG in varyoff state) or - None (VG does not exist), message. - """ - lsvg_cmd = module.get_bin_path('lsvg', True) - rc, current_active_vgs, err = module.run_command([lsvg_cmd, "-o"]) - if rc != 0: - module.fail_json(msg="Failed executing '%s' command." % lsvg_cmd) - - rc, current_all_vgs, err = module.run_command([lsvg_cmd]) - if rc != 0: - module.fail_json(msg="Failed executing '%s' command." % lsvg_cmd) - - if vg in current_all_vgs and vg not in current_active_vgs: - msg = "Volume group '%s' is in varyoff state." % vg - return False, msg - - if vg in current_active_vgs: - msg = "Volume group '%s' is in varyon state." % vg - return True, msg - - msg = "Volume group '%s' does not exist." % vg - return None, msg - - -def create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation): - """ Creates or extend a volume group. """ - - # Command option parameters. - force_opt = { - True: '-f', - False: '' - } - - vg_opt = { - 'normal': '', - 'big': '-B', - 'scalable': '-S', - } - - # Validate if PV are not already in use. - pv_state, msg = _validate_pv(module, vg, pvs) - if not pv_state: - changed = False - return changed, msg - - vg_state, msg = vg_validation - if vg_state is False: - changed = False - return changed, msg - - elif vg_state is True: - # Volume group extension. - changed = True - msg = "" - - if not module.check_mode: - extendvg_cmd = module.get_bin_path('extendvg', True) - rc, output, err = module.run_command([extendvg_cmd, vg] + pvs) - if rc != 0: - changed = False - msg = "Extending volume group '%s' has failed." % vg - return changed, msg - - msg = "Volume group '%s' extended." % vg - return changed, msg - - elif vg_state is None: - # Volume group creation. - changed = True - msg = '' - - if not module.check_mode: - mkvg_cmd = module.get_bin_path('mkvg', True) - rc, output, err = module.run_command([mkvg_cmd, vg_opt[vg_type], pp_size, force_opt[force], "-y", vg] + pvs) - if rc != 0: - changed = False - msg = "Creating volume group '%s' failed." % vg - return changed, msg - - msg = "Volume group '%s' created." % vg - return changed, msg - - -def reduce_vg(module, vg, pvs, vg_validation): - vg_state, msg = vg_validation - - if vg_state is False: - changed = False - return changed, msg - - elif vg_state is None: - changed = False - return changed, msg - - # Define pvs_to_remove (list of physical volumes to be removed). - if pvs is None: - # Remove VG if pvs are note informed. - # Remark: AIX will permit remove only if the VG has not LVs. - lsvg_cmd = module.get_bin_path('lsvg', True) - rc, current_pvs, err = module.run_command([lsvg_cmd, "-p", vg]) - if rc != 0: - module.fail_json(msg="Failing to execute '%s' command." % lsvg_cmd) - - pvs_to_remove = [] - for line in current_pvs.splitlines()[2:]: - pvs_to_remove.append(line.split()[0]) - - reduce_msg = "Volume group '%s' removed." % vg - else: - pvs_to_remove = pvs - reduce_msg = ("Physical volume(s) '%s' removed from Volume group '%s'." % (' '.join(pvs_to_remove), vg)) - - # Reduce volume group. - if len(pvs_to_remove) <= 0: - changed = False - msg = "No physical volumes to remove." - return changed, msg - - changed = True - msg = '' - - if not module.check_mode: - reducevg_cmd = module.get_bin_path('reducevg', True) - rc, stdout, stderr = module.run_command([reducevg_cmd, "-df", vg] + pvs_to_remove) - if rc != 0: - module.fail_json(msg="Unable to remove '%s'." % vg, rc=rc, stdout=stdout, stderr=stderr) - - msg = reduce_msg - return changed, msg - - -def state_vg(module, vg, state, vg_validation): - vg_state, msg = vg_validation - - if vg_state is None: - module.fail_json(msg=msg) - - if state == 'varyon': - if vg_state is True: - changed = False - return changed, msg - - changed = True - msg = '' - if not module.check_mode: - varyonvg_cmd = module.get_bin_path('varyonvg', True) - rc, varyonvg_out, err = module.run_command([varyonvg_cmd, vg]) - if rc != 0: - module.fail_json(msg="Command 'varyonvg' failed.", rc=rc, err=err) - - msg = "Varyon volume group %s completed." % vg - return changed, msg - - elif state == 'varyoff': - if vg_state is False: - changed = False - return changed, msg - - changed = True - msg = '' - - if not module.check_mode: - varyonvg_cmd = module.get_bin_path('varyoffvg', True) - rc, varyonvg_out, stderr = module.run_command([varyonvg_cmd, vg]) - if rc != 0: - module.fail_json(msg="Command 'varyoffvg' failed.", rc=rc, stdout=varyonvg_out, stderr=stderr) - - msg = "Varyoff volume group %s completed." % vg - return changed, msg - - -def main(): - module = AnsibleModule( - argument_spec=dict( - force=dict(type='bool', default=False), - pp_size=dict(type='int'), - pvs=dict(type='list', elements='str'), - state=dict(type='str', default='present', choices=['absent', 'present', 'varyoff', 'varyon']), - vg=dict(type='str', required=True), - vg_type=dict(type='str', default='normal', choices=['big', 'normal', 'scalable']) - ), - supports_check_mode=True, - ) - - force = module.params['force'] - pp_size = module.params['pp_size'] - pvs = module.params['pvs'] - state = module.params['state'] - vg = module.params['vg'] - vg_type = module.params['vg_type'] - - if pp_size is None: - pp_size = '' - else: - pp_size = "-s %s" % pp_size - - vg_validation = _validate_vg(module, vg) - - if state == 'present': - if not pvs: - changed = False - msg = "pvs is required to state 'present'." - module.fail_json(msg=msg) - else: - changed, msg = create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation) - - elif state == 'absent': - changed, msg = reduce_vg(module, vg, pvs, vg_validation) - - elif state == 'varyon' or state == 'varyoff': - changed, msg = state_vg(module, vg, state, vg_validation) - - else: - changed = False - msg = "Unexpected state" - - module.exit_json(changed=changed, msg=msg, state=state) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/system/aix_lvol.py b/ansible_collections/community/general/plugins/modules/system/aix_lvol.py deleted file mode 100644 index 02b4f06c..00000000 --- a/ansible_collections/community/general/plugins/modules/system/aix_lvol.py +++ /dev/null @@ -1,337 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Alain Dejoux -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -author: - - Alain Dejoux (@adejoux) -module: aix_lvol -short_description: Configure AIX LVM logical volumes -description: - - This module creates, removes or resizes AIX logical volumes. Inspired by lvol module. -options: - vg: - description: - - The volume group this logical volume is part of. - type: str - required: true - lv: - description: - - The name of the logical volume. - type: str - required: true - lv_type: - description: - - The type of the logical volume. - type: str - default: jfs2 - size: - description: - - The size of the logical volume with one of the [MGT] units. - type: str - copies: - description: - - The number of copies of the logical volume. - - Maximum copies are 3. - type: int - default: 1 - policy: - description: - - Sets the interphysical volume allocation policy. - - C(maximum) allocates logical partitions across the maximum number of physical volumes. - - C(minimum) allocates logical partitions across the minimum number of physical volumes. - type: str - choices: [ maximum, minimum ] - default: maximum - state: - description: - - Control if the logical volume exists. If C(present) and the - volume does not already exist then the C(size) option is required. - type: str - choices: [ absent, present ] - default: present - opts: - description: - - Free-form options to be passed to the mklv command. - type: str - pvs: - description: - - A list of physical volumes e.g. C(hdisk1,hdisk2). - type: list - elements: str -''' - -EXAMPLES = r''' -- name: Create a logical volume of 512M - community.general.aix_lvol: - vg: testvg - lv: testlv - size: 512M - -- name: Create a logical volume of 512M with disks hdisk1 and hdisk2 - community.general.aix_lvol: - vg: testvg - lv: test2lv - size: 512M - pvs: [ hdisk1, hdisk2 ] - -- name: Create a logical volume of 512M mirrored - community.general.aix_lvol: - vg: testvg - lv: test3lv - size: 512M - copies: 2 - -- name: Create a logical volume of 1G with a minimum placement policy - community.general.aix_lvol: - vg: rootvg - lv: test4lv - size: 1G - policy: minimum - -- name: Create a logical volume with special options like mirror pool - community.general.aix_lvol: - vg: testvg - lv: testlv - size: 512M - opts: -p copy1=poolA -p copy2=poolB - -- name: Extend the logical volume to 1200M - community.general.aix_lvol: - vg: testvg - lv: test4lv - size: 1200M - -- name: Remove the logical volume - community.general.aix_lvol: - vg: testvg - lv: testlv - state: absent -''' - -RETURN = r''' -msg: - type: str - description: A friendly message describing the task result. - returned: always - sample: Logical volume testlv created. -''' - -import re - -from ansible.module_utils.basic import AnsibleModule - - -def convert_size(module, size): - unit = size[-1].upper() - units = ['M', 'G', 'T'] - try: - multiplier = 1024 ** units.index(unit) - except ValueError: - module.fail_json(msg="No valid size unit specified.") - - return int(size[:-1]) * multiplier - - -def round_ppsize(x, base=16): - new_size = int(base * round(float(x) / base)) - if new_size < x: - new_size += base - return new_size - - -def parse_lv(data): - name = None - - for line in data.splitlines(): - match = re.search(r"LOGICAL VOLUME:\s+(\w+)\s+VOLUME GROUP:\s+(\w+)", line) - if match is not None: - name = match.group(1) - vg = match.group(2) - continue - match = re.search(r"LPs:\s+(\d+).*PPs", line) - if match is not None: - lps = int(match.group(1)) - continue - match = re.search(r"PP SIZE:\s+(\d+)", line) - if match is not None: - pp_size = int(match.group(1)) - continue - match = re.search(r"INTER-POLICY:\s+(\w+)", line) - if match is not None: - policy = match.group(1) - continue - - if not name: - return None - - size = lps * pp_size - - return {'name': name, 'vg': vg, 'size': size, 'policy': policy} - - -def parse_vg(data): - - for line in data.splitlines(): - - match = re.search(r"VOLUME GROUP:\s+(\w+)", line) - if match is not None: - name = match.group(1) - continue - - match = re.search(r"TOTAL PP.*\((\d+)", line) - if match is not None: - size = int(match.group(1)) - continue - - match = re.search(r"PP SIZE:\s+(\d+)", line) - if match is not None: - pp_size = int(match.group(1)) - continue - - match = re.search(r"FREE PP.*\((\d+)", line) - if match is not None: - free = int(match.group(1)) - continue - - return {'name': name, 'size': size, 'free': free, 'pp_size': pp_size} - - -def main(): - module = AnsibleModule( - argument_spec=dict( - vg=dict(type='str', required=True), - lv=dict(type='str', required=True), - lv_type=dict(type='str', default='jfs2'), - size=dict(type='str'), - opts=dict(type='str', default=''), - copies=dict(type='int', default=1), - state=dict(type='str', default='present', choices=['absent', 'present']), - policy=dict(type='str', default='maximum', choices=['maximum', 'minimum']), - pvs=dict(type='list', elements='str', default=list()) - ), - supports_check_mode=True, - ) - - vg = module.params['vg'] - lv = module.params['lv'] - lv_type = module.params['lv_type'] - size = module.params['size'] - opts = module.params['opts'] - copies = module.params['copies'] - policy = module.params['policy'] - state = module.params['state'] - pvs = module.params['pvs'] - - pv_list = ' '.join(pvs) - - if policy == 'maximum': - lv_policy = 'x' - else: - lv_policy = 'm' - - # Add echo command when running in check-mode - if module.check_mode: - test_opt = 'echo ' - else: - test_opt = '' - - # check if system commands are available - lsvg_cmd = module.get_bin_path("lsvg", required=True) - lslv_cmd = module.get_bin_path("lslv", required=True) - - # Get information on volume group requested - rc, vg_info, err = module.run_command("%s %s" % (lsvg_cmd, vg)) - - if rc != 0: - if state == 'absent': - module.exit_json(changed=False, msg="Volume group %s does not exist." % vg) - else: - module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, out=vg_info, err=err) - - this_vg = parse_vg(vg_info) - - if size is not None: - # Calculate pp size and round it up based on pp size. - lv_size = round_ppsize(convert_size(module, size), base=this_vg['pp_size']) - - # Get information on logical volume requested - rc, lv_info, err = module.run_command( - "%s %s" % (lslv_cmd, lv)) - - if rc != 0: - if state == 'absent': - module.exit_json(changed=False, msg="Logical Volume %s does not exist." % lv) - - changed = False - - this_lv = parse_lv(lv_info) - - if state == 'present' and not size: - if this_lv is None: - module.fail_json(msg="No size given.") - - if this_lv is None: - if state == 'present': - if lv_size > this_vg['free']: - module.fail_json(msg="Not enough free space in volume group %s: %s MB free." % (this_vg['name'], this_vg['free'])) - - # create LV - mklv_cmd = module.get_bin_path("mklv", required=True) - - cmd = "%s %s -t %s -y %s -c %s -e %s %s %s %sM %s" % (test_opt, mklv_cmd, lv_type, lv, copies, lv_policy, opts, vg, lv_size, pv_list) - rc, out, err = module.run_command(cmd) - if rc == 0: - module.exit_json(changed=True, msg="Logical volume %s created." % lv) - else: - module.fail_json(msg="Creating logical volume %s failed." % lv, rc=rc, out=out, err=err) - else: - if state == 'absent': - # remove LV - rmlv_cmd = module.get_bin_path("rmlv", required=True) - rc, out, err = module.run_command("%s %s -f %s" % (test_opt, rmlv_cmd, this_lv['name'])) - if rc == 0: - module.exit_json(changed=True, msg="Logical volume %s deleted." % lv) - else: - module.fail_json(msg="Failed to remove logical volume %s." % lv, rc=rc, out=out, err=err) - else: - if this_lv['policy'] != policy: - # change lv allocation policy - chlv_cmd = module.get_bin_path("chlv", required=True) - rc, out, err = module.run_command("%s %s -e %s %s" % (test_opt, chlv_cmd, lv_policy, this_lv['name'])) - if rc == 0: - module.exit_json(changed=True, msg="Logical volume %s policy changed: %s." % (lv, policy)) - else: - module.fail_json(msg="Failed to change logical volume %s policy." % lv, rc=rc, out=out, err=err) - - if vg != this_lv['vg']: - module.fail_json(msg="Logical volume %s already exist in volume group %s" % (lv, this_lv['vg'])) - - # from here the last remaining action is to resize it, if no size parameter is passed we do nothing. - if not size: - module.exit_json(changed=False, msg="Logical volume %s already exist." % (lv)) - - # resize LV based on absolute values - if int(lv_size) > this_lv['size']: - extendlv_cmd = module.get_bin_path("extendlv", required=True) - cmd = "%s %s %s %sM" % (test_opt, extendlv_cmd, lv, lv_size - this_lv['size']) - rc, out, err = module.run_command(cmd) - if rc == 0: - module.exit_json(changed=True, msg="Logical volume %s size extended to %sMB." % (lv, lv_size)) - else: - module.fail_json(msg="Unable to resize %s to %sMB." % (lv, lv_size), rc=rc, out=out, err=err) - elif lv_size < this_lv['size']: - module.fail_json(msg="No shrinking of Logical Volume %s permitted. Current size: %s MB" % (lv, this_lv['size'])) - else: - module.exit_json(changed=False, msg="Logical volume %s size is already %sMB." % (lv, lv_size)) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/system/alternatives.py b/ansible_collections/community/general/plugins/modules/system/alternatives.py deleted file mode 100644 index fb4c05e1..00000000 --- a/ansible_collections/community/general/plugins/modules/system/alternatives.py +++ /dev/null @@ -1,159 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2014, Gabe Mulley -# Copyright: (c) 2015, David Wittman -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: alternatives -short_description: Manages alternative programs for common commands -description: - - Manages symbolic links using the 'update-alternatives' tool. - - Useful when multiple programs are installed but provide similar functionality (e.g. different editors). -author: - - David Wittman (@DavidWittman) - - Gabe Mulley (@mulby) -options: - name: - description: - - The generic name of the link. - type: str - required: true - path: - description: - - The path to the real executable that the link should point to. - type: path - required: true - link: - description: - - The path to the symbolic link that should point to the real executable. - - This option is always required on RHEL-based distributions. On Debian-based distributions this option is - required when the alternative I(name) is unknown to the system. - type: path - priority: - description: - - The priority of the alternative. - type: int - default: 50 -requirements: [ update-alternatives ] -''' - -EXAMPLES = r''' -- name: Correct java version selected - community.general.alternatives: - name: java - path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java - -- name: Alternatives link created - community.general.alternatives: - name: hadoop-conf - link: /etc/hadoop/conf - path: /etc/hadoop/conf.ansible - -- name: Make java 32 bit an alternative with low priority - community.general.alternatives: - name: java - path: /usr/lib/jvm/java-7-openjdk-i386/jre/bin/java - priority: -10 -''' - -import os -import re -import subprocess - -from ansible.module_utils.basic import AnsibleModule - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - name=dict(type='str', required=True), - path=dict(type='path', required=True), - link=dict(type='path'), - priority=dict(type='int', default=50), - ), - supports_check_mode=True, - ) - - params = module.params - name = params['name'] - path = params['path'] - link = params['link'] - priority = params['priority'] - - UPDATE_ALTERNATIVES = module.get_bin_path('update-alternatives', True) - - current_path = None - all_alternatives = [] - - # Run `update-alternatives --display ` to find existing alternatives - (rc, display_output, dummy) = module.run_command( - ['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--display', name] - ) - - if rc == 0: - # Alternatives already exist for this link group - # Parse the output to determine the current path of the symlink and - # available alternatives - current_path_regex = re.compile(r'^\s*link currently points to (.*)$', - re.MULTILINE) - alternative_regex = re.compile(r'^(\/.*)\s-\s(?:family\s\S+\s)?priority', re.MULTILINE) - - match = current_path_regex.search(display_output) - if match: - current_path = match.group(1) - all_alternatives = alternative_regex.findall(display_output) - - if not link: - # Read the current symlink target from `update-alternatives --query` - # in case we need to install the new alternative before setting it. - # - # This is only compatible on Debian-based systems, as the other - # alternatives don't have --query available - rc, query_output, dummy = module.run_command( - ['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--query', name] - ) - if rc == 0: - for line in query_output.splitlines(): - if line.startswith('Link:'): - link = line.split()[1] - break - - if current_path != path: - if module.check_mode: - module.exit_json(changed=True, current_path=current_path) - try: - # install the requested path if necessary - if path not in all_alternatives: - if not os.path.exists(path): - module.fail_json(msg="Specified path %s does not exist" % path) - if not link: - module.fail_json(msg="Needed to install the alternative, but unable to do so as we are missing the link") - - module.run_command( - [UPDATE_ALTERNATIVES, '--install', link, name, path, str(priority)], - check_rc=True - ) - - # select the requested path - module.run_command( - [UPDATE_ALTERNATIVES, '--set', name, path], - check_rc=True - ) - - module.exit_json(changed=True) - except subprocess.CalledProcessError as cpe: - module.fail_json(msg=str(dir(cpe))) - else: - module.exit_json(changed=False) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/system/awall.py b/ansible_collections/community/general/plugins/modules/system/awall.py deleted file mode 100644 index 260c7ae4..00000000 --- a/ansible_collections/community/general/plugins/modules/system/awall.py +++ /dev/null @@ -1,153 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, Ted Trask -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: awall -short_description: Manage awall policies -author: Ted Trask (@tdtrask) -description: - - This modules allows for enable/disable/activate of I(awall) policies. - - Alpine Wall (I(awall)) generates a firewall configuration from the enabled policy files - and activates the configuration on the system. -options: - name: - description: - - One or more policy names. - type: list - elements: str - state: - description: - - Whether the policies should be enabled or disabled. - type: str - choices: [ disabled, enabled ] - default: enabled - activate: - description: - - Activate the new firewall rules. - - Can be run with other steps or on its own. - type: bool - default: no -''' - -EXAMPLES = r''' -- name: Enable "foo" and "bar" policy - community.general.awall: - name: [ foo bar ] - state: enabled - -- name: Disable "foo" and "bar" policy and activate new rules - community.general.awall: - name: - - foo - - bar - state: disabled - activate: no - -- name: Activate currently enabled firewall rules - community.general.awall: - activate: yes -''' - -RETURN = ''' # ''' - -import re -from ansible.module_utils.basic import AnsibleModule - - -def activate(module): - cmd = "%s activate --force" % (AWALL_PATH) - rc, stdout, stderr = module.run_command(cmd) - if rc == 0: - return True - else: - module.fail_json(msg="could not activate new rules", stdout=stdout, stderr=stderr) - - -def is_policy_enabled(module, name): - cmd = "%s list" % (AWALL_PATH) - rc, stdout, stderr = module.run_command(cmd) - if re.search(r"^%s\s+enabled" % name, stdout, re.MULTILINE): - return True - return False - - -def enable_policy(module, names, act): - policies = [] - for name in names: - if not is_policy_enabled(module, name): - policies.append(name) - if not policies: - module.exit_json(changed=False, msg="policy(ies) already enabled") - names = " ".join(policies) - if module.check_mode: - cmd = "%s list" % (AWALL_PATH) - else: - cmd = "%s enable %s" % (AWALL_PATH, names) - rc, stdout, stderr = module.run_command(cmd) - if rc != 0: - module.fail_json(msg="failed to enable %s" % names, stdout=stdout, stderr=stderr) - if act and not module.check_mode: - activate(module) - module.exit_json(changed=True, msg="enabled awall policy(ies): %s" % names) - - -def disable_policy(module, names, act): - policies = [] - for name in names: - if is_policy_enabled(module, name): - policies.append(name) - if not policies: - module.exit_json(changed=False, msg="policy(ies) already disabled") - names = " ".join(policies) - if module.check_mode: - cmd = "%s list" % (AWALL_PATH) - else: - cmd = "%s disable %s" % (AWALL_PATH, names) - rc, stdout, stderr = module.run_command(cmd) - if rc != 0: - module.fail_json(msg="failed to disable %s" % names, stdout=stdout, stderr=stderr) - if act and not module.check_mode: - activate(module) - module.exit_json(changed=True, msg="disabled awall policy(ies): %s" % names) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - state=dict(type='str', default='enabled', choices=['disabled', 'enabled']), - name=dict(type='list', elements='str'), - activate=dict(type='bool', default=False), - ), - required_one_of=[['name', 'activate']], - supports_check_mode=True, - ) - - global AWALL_PATH - AWALL_PATH = module.get_bin_path('awall', required=True) - - p = module.params - - if p['name']: - if p['state'] == 'enabled': - enable_policy(module, p['name'], p['activate']) - elif p['state'] == 'disabled': - disable_policy(module, p['name'], p['activate']) - - if p['activate']: - if not module.check_mode: - activate(module) - module.exit_json(changed=True, msg="activated awall rules") - - module.fail_json(msg="no action defined") - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/system/beadm.py b/ansible_collections/community/general/plugins/modules/system/beadm.py deleted file mode 100644 index d89ca79a..00000000 --- a/ansible_collections/community/general/plugins/modules/system/beadm.py +++ /dev/null @@ -1,408 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Adam Števko -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: beadm -short_description: Manage ZFS boot environments on FreeBSD/Solaris/illumos systems. -description: - - Create, delete or activate ZFS boot environments. - - Mount and unmount ZFS boot environments. -author: Adam Števko (@xen0l) -options: - name: - description: - - ZFS boot environment name. - type: str - required: True - aliases: [ "be" ] - snapshot: - description: - - If specified, the new boot environment will be cloned from the given - snapshot or inactive boot environment. - type: str - description: - description: - - Associate a description with a new boot environment. This option is - available only on Solarish platforms. - type: str - options: - description: - - Create the datasets for new BE with specific ZFS properties. - - Multiple options can be specified. - - This option is available only on Solarish platforms. - type: str - mountpoint: - description: - - Path where to mount the ZFS boot environment. - type: path - state: - description: - - Create or delete ZFS boot environment. - type: str - choices: [ absent, activated, mounted, present, unmounted ] - default: present - force: - description: - - Specifies if the unmount should be forced. - type: bool - default: false -''' - -EXAMPLES = r''' -- name: Create ZFS boot environment - community.general.beadm: - name: upgrade-be - state: present - -- name: Create ZFS boot environment from existing inactive boot environment - community.general.beadm: - name: upgrade-be - snapshot: be@old - state: present - -- name: Create ZFS boot environment with compression enabled and description "upgrade" - community.general.beadm: - name: upgrade-be - options: "compression=on" - description: upgrade - state: present - -- name: Delete ZFS boot environment - community.general.beadm: - name: old-be - state: absent - -- name: Mount ZFS boot environment on /tmp/be - community.general.beadm: - name: BE - mountpoint: /tmp/be - state: mounted - -- name: Unmount ZFS boot environment - community.general.beadm: - name: BE - state: unmounted - -- name: Activate ZFS boot environment - community.general.beadm: - name: upgrade-be - state: activated -''' - -RETURN = r''' -name: - description: BE name - returned: always - type: str - sample: pre-upgrade -snapshot: - description: ZFS snapshot to create BE from - returned: always - type: str - sample: rpool/ROOT/oi-hipster@fresh -description: - description: BE description - returned: always - type: str - sample: Upgrade from 9.0 to 10.0 -options: - description: BE additional options - returned: always - type: str - sample: compression=on -mountpoint: - description: BE mountpoint - returned: always - type: str - sample: /mnt/be -state: - description: state of the target - returned: always - type: str - sample: present -force: - description: If forced action is wanted - returned: always - type: bool - sample: False -''' - -import os -import re -from ansible.module_utils.basic import AnsibleModule - - -class BE(object): - def __init__(self, module): - self.module = module - - self.name = module.params['name'] - self.snapshot = module.params['snapshot'] - self.description = module.params['description'] - self.options = module.params['options'] - self.mountpoint = module.params['mountpoint'] - self.state = module.params['state'] - self.force = module.params['force'] - self.is_freebsd = os.uname()[0] == 'FreeBSD' - - def _beadm_list(self): - cmd = [self.module.get_bin_path('beadm'), 'list', '-H'] - if '@' in self.name: - cmd.append('-s') - return self.module.run_command(cmd) - - def _find_be_by_name(self, out): - if '@' in self.name: - for line in out.splitlines(): - if self.is_freebsd: - check = line.split() - if(check == []): - continue - full_name = check[0].split('/') - if(full_name == []): - continue - check[0] = full_name[len(full_name) - 1] - if check[0] == self.name: - return check - else: - check = line.split(';') - if check[0] == self.name: - return check - else: - for line in out.splitlines(): - if self.is_freebsd: - check = line.split() - if check[0] == self.name: - return check - else: - check = line.split(';') - if check[0] == self.name: - return check - return None - - def exists(self): - (rc, out, dummy) = self._beadm_list() - - if rc == 0: - if self._find_be_by_name(out): - return True - else: - return False - else: - return False - - def is_activated(self): - (rc, out, dummy) = self._beadm_list() - - if rc == 0: - line = self._find_be_by_name(out) - if line is None: - return False - if self.is_freebsd: - if 'R' in line[1]: - return True - else: - if 'R' in line[2]: - return True - - return False - - def activate_be(self): - cmd = [self.module.get_bin_path('beadm'), 'activate', self.name] - return self.module.run_command(cmd) - - def create_be(self): - cmd = [self.module.get_bin_path('beadm'), 'create'] - - if self.snapshot: - cmd.extend(['-e', self.snapshot]) - if not self.is_freebsd: - if self.description: - cmd.extend(['-d', self.description]) - if self.options: - cmd.extend(['-o', self.options]) - - cmd.append(self.name) - - return self.module.run_command(cmd) - - def destroy_be(self): - cmd = [self.module.get_bin_path('beadm'), 'destroy', '-F', self.name] - return self.module.run_command(cmd) - - def is_mounted(self): - (rc, out, dummy) = self._beadm_list() - - if rc == 0: - line = self._find_be_by_name(out) - if line is None: - return False - if self.is_freebsd: - # On FreeBSD, we exclude currently mounted BE on /, as it is - # special and can be activated even if it is mounted. That is not - # possible with non-root BEs. - if line[2] != '-' and line[2] != '/': - return True - else: - if line[3]: - return True - - return False - - def mount_be(self): - cmd = [self.module.get_bin_path('beadm'), 'mount', self.name] - - if self.mountpoint: - cmd.append(self.mountpoint) - - return self.module.run_command(cmd) - - def unmount_be(self): - cmd = [self.module.get_bin_path('beadm'), 'unmount'] - if self.force: - cmd.append('-f') - cmd.append(self.name) - - return self.module.run_command(cmd) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(type='str', required=True, aliases=['be']), - snapshot=dict(type='str'), - description=dict(type='str'), - options=dict(type='str'), - mountpoint=dict(type='path'), - state=dict(type='str', default='present', choices=['absent', 'activated', 'mounted', 'present', 'unmounted']), - force=dict(type='bool', default=False), - ), - supports_check_mode=True, - ) - - be = BE(module) - - rc = None - out = '' - err = '' - result = {} - result['name'] = be.name - result['state'] = be.state - - if be.snapshot: - result['snapshot'] = be.snapshot - - if be.description: - result['description'] = be.description - - if be.options: - result['options'] = be.options - - if be.mountpoint: - result['mountpoint'] = be.mountpoint - - if be.state == 'absent': - # beadm on FreeBSD and Solarish systems differs in delete behaviour in - # that we are not allowed to delete activated BE on FreeBSD while on - # Solarish systems we cannot delete BE if it is mounted. We add mount - # check for both platforms as BE should be explicitly unmounted before - # being deleted. On FreeBSD, we also check if the BE is activated. - if be.exists(): - if not be.is_mounted(): - if module.check_mode: - module.exit_json(changed=True) - - if be.is_freebsd: - if be.is_activated(): - module.fail_json(msg='Unable to remove active BE!') - - (rc, out, err) = be.destroy_be() - - if rc != 0: - module.fail_json(msg='Error while destroying BE: "%s"' % err, - name=be.name, - stderr=err, - rc=rc) - else: - module.fail_json(msg='Unable to remove BE as it is mounted!') - - elif be.state == 'present': - if not be.exists(): - if module.check_mode: - module.exit_json(changed=True) - - (rc, out, err) = be.create_be() - - if rc != 0: - module.fail_json(msg='Error while creating BE: "%s"' % err, - name=be.name, - stderr=err, - rc=rc) - - elif be.state == 'activated': - if not be.is_activated(): - if module.check_mode: - module.exit_json(changed=True) - - # On FreeBSD, beadm is unable to activate mounted BEs, so we add - # an explicit check for that case. - if be.is_freebsd: - if be.is_mounted(): - module.fail_json(msg='Unable to activate mounted BE!') - - (rc, out, err) = be.activate_be() - - if rc != 0: - module.fail_json(msg='Error while activating BE: "%s"' % err, - name=be.name, - stderr=err, - rc=rc) - elif be.state == 'mounted': - if not be.is_mounted(): - if module.check_mode: - module.exit_json(changed=True) - - (rc, out, err) = be.mount_be() - - if rc != 0: - module.fail_json(msg='Error while mounting BE: "%s"' % err, - name=be.name, - stderr=err, - rc=rc) - - elif be.state == 'unmounted': - if be.is_mounted(): - if module.check_mode: - module.exit_json(changed=True) - - (rc, out, err) = be.unmount_be() - - if rc != 0: - module.fail_json(msg='Error while unmounting BE: "%s"' % err, - name=be.name, - stderr=err, - rc=rc) - - if rc is None: - result['changed'] = False - else: - result['changed'] = True - - if out: - result['stdout'] = out - if err: - result['stderr'] = err - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/system/capabilities.py b/ansible_collections/community/general/plugins/modules/system/capabilities.py deleted file mode 100644 index ac6dde67..00000000 --- a/ansible_collections/community/general/plugins/modules/system/capabilities.py +++ /dev/null @@ -1,180 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2014, Nate Coraor -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: capabilities -short_description: Manage Linux capabilities -description: - - This module manipulates files privileges using the Linux capabilities(7) system. -options: - path: - description: - - Specifies the path to the file to be managed. - type: str - required: yes - aliases: [ key ] - capability: - description: - - Desired capability to set (with operator and flags, if state is C(present)) or remove (if state is C(absent)) - type: str - required: yes - aliases: [ cap ] - state: - description: - - Whether the entry should be present or absent in the file's capabilities. - type: str - choices: [ absent, present ] - default: present -notes: - - The capabilities system will automatically transform operators and flags into the effective set, - so for example, C(cap_foo=ep) will probably become C(cap_foo+ep). - - This module does not attempt to determine the final operator and flags to compare, - so you will want to ensure that your capabilities argument matches the final capabilities. -author: -- Nate Coraor (@natefoo) -''' - -EXAMPLES = r''' -- name: Set cap_sys_chroot+ep on /foo - community.general.capabilities: - path: /foo - capability: cap_sys_chroot+ep - state: present - -- name: Remove cap_net_bind_service from /bar - community.general.capabilities: - path: /bar - capability: cap_net_bind_service - state: absent -''' - -from ansible.module_utils.basic import AnsibleModule - -OPS = ('=', '-', '+') - - -class CapabilitiesModule(object): - platform = 'Linux' - distribution = None - - def __init__(self, module): - self.module = module - self.path = module.params['path'].strip() - self.capability = module.params['capability'].strip().lower() - self.state = module.params['state'] - self.getcap_cmd = module.get_bin_path('getcap', required=True) - self.setcap_cmd = module.get_bin_path('setcap', required=True) - self.capability_tup = self._parse_cap(self.capability, op_required=self.state == 'present') - - self.run() - - def run(self): - - current = self.getcap(self.path) - caps = [cap[0] for cap in current] - - if self.state == 'present' and self.capability_tup not in current: - # need to add capability - if self.module.check_mode: - self.module.exit_json(changed=True, msg='capabilities changed') - else: - # remove from current cap list if it's already set (but op/flags differ) - current = list(filter(lambda x: x[0] != self.capability_tup[0], current)) - # add new cap with correct op/flags - current.append(self.capability_tup) - self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current)) - elif self.state == 'absent' and self.capability_tup[0] in caps: - # need to remove capability - if self.module.check_mode: - self.module.exit_json(changed=True, msg='capabilities changed') - else: - # remove from current cap list and then set current list - current = filter(lambda x: x[0] != self.capability_tup[0], current) - self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current)) - self.module.exit_json(changed=False, state=self.state) - - def getcap(self, path): - rval = [] - cmd = "%s -v %s" % (self.getcap_cmd, path) - rc, stdout, stderr = self.module.run_command(cmd) - # If file xattrs are set but no caps are set the output will be: - # '/foo =' - # If file xattrs are unset the output will be: - # '/foo' - # If the file does not exist, the stderr will be (with rc == 0...): - # '/foo (No such file or directory)' - if rc != 0 or stderr != "": - self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout.strip(), stderr=stderr) - if stdout.strip() != path: - if ' =' in stdout: - # process output of an older version of libcap - caps = stdout.split(' =')[1].strip().split() - else: - # otherwise, we have a newer version here - # see original commit message of cap/v0.2.40-18-g177cd41 in libcap.git - caps = stdout.split()[1].strip().split() - for cap in caps: - cap = cap.lower() - # getcap condenses capabilities with the same op/flags into a - # comma-separated list, so we have to parse that - if ',' in cap: - cap_group = cap.split(',') - cap_group[-1], op, flags = self._parse_cap(cap_group[-1]) - for subcap in cap_group: - rval.append((subcap, op, flags)) - else: - rval.append(self._parse_cap(cap)) - return rval - - def setcap(self, path, caps): - caps = ' '.join([''.join(cap) for cap in caps]) - cmd = "%s '%s' %s" % (self.setcap_cmd, caps, path) - rc, stdout, stderr = self.module.run_command(cmd) - if rc != 0: - self.module.fail_json(msg="Unable to set capabilities of %s" % path, stdout=stdout, stderr=stderr) - else: - return stdout - - def _parse_cap(self, cap, op_required=True): - opind = -1 - try: - i = 0 - while opind == -1: - opind = cap.find(OPS[i]) - i += 1 - except Exception: - if op_required: - self.module.fail_json(msg="Couldn't find operator (one of: %s)" % str(OPS)) - else: - return (cap, None, None) - op = cap[opind] - cap, flags = cap.split(op) - return (cap, op, flags) - - -# ============================================================== -# main - -def main(): - # defining module - module = AnsibleModule( - argument_spec=dict( - path=dict(type='str', required=True, aliases=['key']), - capability=dict(type='str', required=True, aliases=['cap']), - state=dict(type='str', default='present', choices=['absent', 'present']), - ), - supports_check_mode=True, - ) - - CapabilitiesModule(module) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/system/cronvar.py b/ansible_collections/community/general/plugins/modules/system/cronvar.py deleted file mode 100644 index 9871668a..00000000 --- a/ansible_collections/community/general/plugins/modules/system/cronvar.py +++ /dev/null @@ -1,423 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# Cronvar Plugin: The goal of this plugin is to provide an idempotent -# method for set cron variable values. It should play well with the -# existing cron module as well as allow for manually added variables. -# Each variable entered will be preceded with a comment describing the -# variable so that it can be found later. This is required to be -# present in order for this plugin to find/modify the variable - -# This module is based on the crontab module. - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: cronvar -short_description: Manage variables in crontabs -description: - - Use this module to manage crontab variables. - - This module allows you to create, update, or delete cron variable definitions. -options: - name: - description: - - Name of the crontab variable. - type: str - required: yes - value: - description: - - The value to set this variable to. - - Required if C(state=present). - type: str - insertafter: - description: - - If specified, the variable will be inserted after the variable specified. - - Used with C(state=present). - type: str - insertbefore: - description: - - Used with C(state=present). If specified, the variable will be inserted - just before the variable specified. - type: str - state: - description: - - Whether to ensure that the variable is present or absent. - type: str - choices: [ absent, present ] - default: present - user: - description: - - The specific user whose crontab should be modified. - - This parameter defaults to C(root) when unset. - type: str - cron_file: - description: - - If specified, uses this file instead of an individual user's crontab. - - Without a leading C(/), this is assumed to be in I(/etc/cron.d). - - With a leading C(/), this is taken as absolute. - type: str - backup: - description: - - If set, create a backup of the crontab before it is modified. - The location of the backup is returned in the C(backup) variable by this module. - type: bool - default: no -requirements: - - cron -author: -- Doug Luce (@dougluce) -''' - -EXAMPLES = r''' -- name: Ensure entry like "EMAIL=doug@ansibmod.con.com" exists - community.general.cronvar: - name: EMAIL - value: doug@ansibmod.con.com - -- name: Ensure a variable does not exist. This may remove any variable named "LEGACY" - community.general.cronvar: - name: LEGACY - state: absent - -- name: Add a variable to a file under /etc/cron.d - community.general.cronvar: - name: LOGFILE - value: /var/log/yum-autoupdate.log - user: root - cron_file: ansible_yum-autoupdate -''' - -import os -import platform -import pwd -import re -import shlex -import sys -import tempfile - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import shlex_quote - - -class CronVarError(Exception): - pass - - -class CronVar(object): - """ - CronVar object to write variables to crontabs. - - user - the user of the crontab (defaults to root) - cron_file - a cron file under /etc/cron.d - """ - - def __init__(self, module, user=None, cron_file=None): - self.module = module - self.user = user - self.lines = None - self.wordchars = ''.join(chr(x) for x in range(128) if chr(x) not in ('=', "'", '"',)) - self.cron_cmd = self.module.get_bin_path('crontab', required=True) - - if cron_file: - self.cron_file = "" - if os.path.isabs(cron_file): - self.cron_file = cron_file - else: - self.cron_file = os.path.join('/etc/cron.d', cron_file) - else: - self.cron_file = None - - self.read() - - def read(self): - # Read in the crontab from the system - self.lines = [] - if self.cron_file: - # read the cronfile - try: - f = open(self.cron_file, 'r') - self.lines = f.read().splitlines() - f.close() - except IOError: - # cron file does not exist - return - except Exception: - raise CronVarError("Unexpected error:", sys.exc_info()[0]) - else: - # using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME - (rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True) - - if rc != 0 and rc != 1: # 1 can mean that there are no jobs. - raise CronVarError("Unable to read crontab") - - lines = out.splitlines() - count = 0 - for l in lines: - if count > 2 or (not re.match(r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l - ) and not re.match(r'# \(/tmp/.*installed on.*\)', l) and not re.match(r'# \(.*version.*\)', l)): - self.lines.append(l) - count += 1 - - def log_message(self, message): - self.module.debug('ansible: "%s"' % message) - - def write(self, backup_file=None): - """ - Write the crontab to the system. Saves all information. - """ - if backup_file: - fileh = open(backup_file, 'w') - elif self.cron_file: - fileh = open(self.cron_file, 'w') - else: - filed, path = tempfile.mkstemp(prefix='crontab') - fileh = os.fdopen(filed, 'w') - - fileh.write(self.render()) - fileh.close() - - # return if making a backup - if backup_file: - return - - # Add the entire crontab back to the user crontab - if not self.cron_file: - # quoting shell args for now but really this should be two non-shell calls. FIXME - (rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True) - os.unlink(path) - - if rc != 0: - self.module.fail_json(msg=err) - - def remove_variable_file(self): - try: - os.unlink(self.cron_file) - return True - except OSError: - # cron file does not exist - return False - except Exception: - raise CronVarError("Unexpected error:", sys.exc_info()[0]) - - def parse_for_var(self, line): - lexer = shlex.shlex(line) - lexer.wordchars = self.wordchars - varname = lexer.get_token() - is_env_var = lexer.get_token() == '=' - value = ''.join(lexer) - if is_env_var: - return (varname, value) - raise CronVarError("Not a variable.") - - def find_variable(self, name): - for l in self.lines: - try: - (varname, value) = self.parse_for_var(l) - if varname == name: - return value - except CronVarError: - pass - return None - - def get_var_names(self): - var_names = [] - for l in self.lines: - try: - var_name, dummy = self.parse_for_var(l) - var_names.append(var_name) - except CronVarError: - pass - return var_names - - def add_variable(self, name, value, insertbefore, insertafter): - if insertbefore is None and insertafter is None: - # Add the variable to the top of the file. - self.lines.insert(0, "%s=%s" % (name, value)) - else: - newlines = [] - for l in self.lines: - try: - varname, dummy = self.parse_for_var(l) # Throws if not a var line - if varname == insertbefore: - newlines.append("%s=%s" % (name, value)) - newlines.append(l) - elif varname == insertafter: - newlines.append(l) - newlines.append("%s=%s" % (name, value)) - else: - raise CronVarError # Append. - except CronVarError: - newlines.append(l) - - self.lines = newlines - - def remove_variable(self, name): - self.update_variable(name, None, remove=True) - - def update_variable(self, name, value, remove=False): - newlines = [] - for l in self.lines: - try: - varname, dummy = self.parse_for_var(l) # Throws if not a var line - if varname != name: - raise CronVarError # Append. - if not remove: - newlines.append("%s=%s" % (name, value)) - except CronVarError: - newlines.append(l) - - self.lines = newlines - - def render(self): - """ - Render a proper crontab - """ - result = '\n'.join(self.lines) - if result and result[-1] not in ['\n', '\r']: - result += '\n' - return result - - def _read_user_execute(self): - """ - Returns the command line for reading a crontab - """ - user = '' - - if self.user: - if platform.system() == 'SunOS': - return "su %s -c '%s -l'" % (shlex_quote(self.user), shlex_quote(self.cron_cmd)) - elif platform.system() == 'AIX': - return "%s -l %s" % (shlex_quote(self.cron_cmd), shlex_quote(self.user)) - elif platform.system() == 'HP-UX': - return "%s %s %s" % (self.cron_cmd, '-l', shlex_quote(self.user)) - elif pwd.getpwuid(os.getuid())[0] != self.user: - user = '-u %s' % shlex_quote(self.user) - return "%s %s %s" % (self.cron_cmd, user, '-l') - - def _write_execute(self, path): - """ - Return the command line for writing a crontab - """ - user = '' - if self.user: - if platform.system() in ['SunOS', 'HP-UX', 'AIX']: - return "chown %s %s ; su '%s' -c '%s %s'" % ( - shlex_quote(self.user), shlex_quote(path), shlex_quote(self.user), self.cron_cmd, shlex_quote(path)) - elif pwd.getpwuid(os.getuid())[0] != self.user: - user = '-u %s' % shlex_quote(self.user) - return "%s %s %s" % (self.cron_cmd, user, shlex_quote(path)) - - -# ================================================== - -def main(): - # The following example playbooks: - # - # - community.general.cronvar: name="SHELL" value="/bin/bash" - # - # - name: Set the email - # community.general.cronvar: name="EMAILTO" value="doug@ansibmod.con.com" - # - # - name: Get rid of the old new host variable - # community.general.cronvar: name="NEW_HOST" state=absent - # - # Would produce: - # SHELL = /bin/bash - # EMAILTO = doug@ansibmod.con.com - - module = AnsibleModule( - argument_spec=dict( - name=dict(type='str', required=True), - value=dict(type='str'), - user=dict(type='str'), - cron_file=dict(type='str'), - insertafter=dict(type='str'), - insertbefore=dict(type='str'), - state=dict(type='str', default='present', choices=['absent', 'present']), - backup=dict(type='bool', default=False), - ), - mutually_exclusive=[['insertbefore', 'insertafter']], - supports_check_mode=False, - ) - - name = module.params['name'] - value = module.params['value'] - user = module.params['user'] - cron_file = module.params['cron_file'] - insertafter = module.params['insertafter'] - insertbefore = module.params['insertbefore'] - state = module.params['state'] - backup = module.params['backup'] - ensure_present = state == 'present' - - changed = False - res_args = dict() - - # Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option. - os.umask(int('022', 8)) - cronvar = CronVar(module, user, cron_file) - - module.debug('cronvar instantiated - name: "%s"' % name) - - # --- user input validation --- - - if name is None and ensure_present: - module.fail_json(msg="You must specify 'name' to insert a new cron variable") - - if value is None and ensure_present: - module.fail_json(msg="You must specify 'value' to insert a new cron variable") - - if name is None and not ensure_present: - module.fail_json(msg="You must specify 'name' to remove a cron variable") - - # if requested make a backup before making a change - if backup: - dummy, backup_file = tempfile.mkstemp(prefix='cronvar') - cronvar.write(backup_file) - - if cronvar.cron_file and not name and not ensure_present: - changed = cronvar.remove_job_file() - module.exit_json(changed=changed, cron_file=cron_file, state=state) - - old_value = cronvar.find_variable(name) - - if ensure_present: - if old_value is None: - cronvar.add_variable(name, value, insertbefore, insertafter) - changed = True - elif old_value != value: - cronvar.update_variable(name, value) - changed = True - else: - if old_value is not None: - cronvar.remove_variable(name) - changed = True - - res_args = { - "vars": cronvar.get_var_names(), - "changed": changed - } - - if changed: - cronvar.write() - - # retain the backup only if crontab or cron file have changed - if backup: - if changed: - res_args['backup_file'] = backup_file - else: - os.unlink(backup_file) - - if cron_file: - res_args['cron_file'] = cron_file - - module.exit_json(**res_args) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/system/crypttab.py b/ansible_collections/community/general/plugins/modules/system/crypttab.py deleted file mode 100644 index 8eeec56d..00000000 --- a/ansible_collections/community/general/plugins/modules/system/crypttab.py +++ /dev/null @@ -1,354 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2014, Steve -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: crypttab -short_description: Encrypted Linux block devices -description: - - Control Linux encrypted block devices that are set up during system boot in C(/etc/crypttab). -options: - name: - description: - - Name of the encrypted block device as it appears in the C(/etc/crypttab) file, or - optionally prefixed with C(/dev/mapper/), as it appears in the filesystem. I(/dev/mapper/) - will be stripped from I(name). - type: str - required: yes - state: - description: - - Use I(present) to add a line to C(/etc/crypttab) or update its definition - if already present. - - Use I(absent) to remove a line with matching I(name). - - Use I(opts_present) to add options to those already present; options with - different values will be updated. - - Use I(opts_absent) to remove options from the existing set. - type: str - required: yes - choices: [ absent, opts_absent, opts_present, present ] - backing_device: - description: - - Path to the underlying block device or file, or the UUID of a block-device - prefixed with I(UUID=). - type: str - password: - description: - - Encryption password, the path to a file containing the password, or - C(-) or unset if the password should be entered at boot. - type: path - opts: - description: - - A comma-delimited list of options. See C(crypttab(5) ) for details. - type: str - path: - description: - - Path to file to use instead of C(/etc/crypttab). - - This might be useful in a chroot environment. - type: path - default: /etc/crypttab -author: -- Steve (@groks) -''' - -EXAMPLES = r''' -- name: Set the options explicitly a device which must already exist - community.general.crypttab: - name: luks-home - state: present - opts: discard,cipher=aes-cbc-essiv:sha256 - -- name: Add the 'discard' option to any existing options for all devices - community.general.crypttab: - name: '{{ item.device }}' - state: opts_present - opts: discard - loop: '{{ ansible_mounts }}' - when: "'/dev/mapper/luks-' in {{ item.device }}" -''' - -import os -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_bytes, to_native - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(type='str', required=True), - state=dict(type='str', required=True, choices=['absent', 'opts_absent', 'opts_present', 'present']), - backing_device=dict(type='str'), - password=dict(type='path'), - opts=dict(type='str'), - path=dict(type='path', default='/etc/crypttab') - ), - supports_check_mode=True, - ) - - backing_device = module.params['backing_device'] - password = module.params['password'] - opts = module.params['opts'] - state = module.params['state'] - path = module.params['path'] - name = module.params['name'] - if name.startswith('/dev/mapper/'): - name = name[len('/dev/mapper/'):] - - if state != 'absent' and backing_device is None and password is None and opts is None: - module.fail_json(msg="expected one or more of 'backing_device', 'password' or 'opts'", - **module.params) - - if 'opts' in state and (backing_device is not None or password is not None): - module.fail_json(msg="cannot update 'backing_device' or 'password' when state=%s" % state, - **module.params) - - for arg_name, arg in (('name', name), - ('backing_device', backing_device), - ('password', password), - ('opts', opts)): - if (arg is not None and (' ' in arg or '\t' in arg or arg == '')): - module.fail_json(msg="invalid '%s': contains white space or is empty" % arg_name, - **module.params) - - try: - crypttab = Crypttab(path) - existing_line = crypttab.match(name) - except Exception as e: - module.fail_json(msg="failed to open and parse crypttab file: %s" % to_native(e), - exception=traceback.format_exc(), **module.params) - - if 'present' in state and existing_line is None and backing_device is None: - module.fail_json(msg="'backing_device' required to add a new entry", - **module.params) - - changed, reason = False, '?' - - if state == 'absent': - if existing_line is not None: - changed, reason = existing_line.remove() - - elif state == 'present': - if existing_line is not None: - changed, reason = existing_line.set(backing_device, password, opts) - else: - changed, reason = crypttab.add(Line(None, name, backing_device, password, opts)) - - elif state == 'opts_present': - if existing_line is not None: - changed, reason = existing_line.opts.add(opts) - else: - changed, reason = crypttab.add(Line(None, name, backing_device, password, opts)) - - elif state == 'opts_absent': - if existing_line is not None: - changed, reason = existing_line.opts.remove(opts) - - if changed and not module.check_mode: - try: - f = open(path, 'wb') - f.write(to_bytes(crypttab, errors='surrogate_or_strict')) - finally: - f.close() - - module.exit_json(changed=changed, msg=reason, **module.params) - - -class Crypttab(object): - _lines = [] - - def __init__(self, path): - self.path = path - if not os.path.exists(path): - if not os.path.exists(os.path.dirname(path)): - os.makedirs(os.path.dirname(path)) - open(path, 'a').close() - - try: - f = open(path, 'r') - for line in f.readlines(): - self._lines.append(Line(line)) - finally: - f.close() - - def add(self, line): - self._lines.append(line) - return True, 'added line' - - def lines(self): - for line in self._lines: - if line.valid(): - yield line - - def match(self, name): - for line in self.lines(): - if line.name == name: - return line - return None - - def __str__(self): - lines = [] - for line in self._lines: - lines.append(str(line)) - crypttab = '\n'.join(lines) - if len(crypttab) == 0: - crypttab += '\n' - if crypttab[-1] != '\n': - crypttab += '\n' - return crypttab - - -class Line(object): - def __init__(self, line=None, name=None, backing_device=None, password=None, opts=None): - self.line = line - self.name = name - self.backing_device = backing_device - self.password = password - self.opts = Options(opts) - - if line is not None: - self.line = self.line.rstrip('\n') - if self._line_valid(line): - self.name, backing_device, password, opts = self._split_line(line) - - self.set(backing_device, password, opts) - - def set(self, backing_device, password, opts): - changed = False - - if backing_device is not None and self.backing_device != backing_device: - self.backing_device = backing_device - changed = True - - if password is not None and self.password != password: - self.password = password - changed = True - - if opts is not None: - opts = Options(opts) - if opts != self.opts: - self.opts = opts - changed = True - - return changed, 'updated line' - - def _line_valid(self, line): - if not line.strip() or line.startswith('#') or len(line.split()) not in (2, 3, 4): - return False - return True - - def _split_line(self, line): - fields = line.split() - try: - field2 = fields[2] - except IndexError: - field2 = None - try: - field3 = fields[3] - except IndexError: - field3 = None - - return (fields[0], - fields[1], - field2, - field3) - - def remove(self): - self.line, self.name, self.backing_device = '', None, None - return True, 'removed line' - - def valid(self): - if self.name is not None and self.backing_device is not None: - return True - return False - - def __str__(self): - if self.valid(): - fields = [self.name, self.backing_device] - if self.password is not None or self.opts: - if self.password is not None: - fields.append(self.password) - else: - fields.append('none') - if self.opts: - fields.append(str(self.opts)) - return ' '.join(fields) - return self.line - - -class Options(dict): - """opts_string looks like: 'discard,foo=bar,baz=greeble' """ - - def __init__(self, opts_string): - super(Options, self).__init__() - self.itemlist = [] - if opts_string is not None: - for opt in opts_string.split(','): - kv = opt.split('=') - if len(kv) > 1: - k, v = (kv[0], kv[1]) - else: - k, v = (kv[0], None) - self[k] = v - - def add(self, opts_string): - changed = False - for k, v in Options(opts_string).items(): - if k in self: - if self[k] != v: - changed = True - else: - changed = True - self[k] = v - return changed, 'updated options' - - def remove(self, opts_string): - changed = False - for k in Options(opts_string): - if k in self: - del self[k] - changed = True - return changed, 'removed options' - - def keys(self): - return self.itemlist - - def values(self): - return [self[key] for key in self] - - def items(self): - return [(key, self[key]) for key in self] - - def __iter__(self): - return iter(self.itemlist) - - def __setitem__(self, key, value): - if key not in self: - self.itemlist.append(key) - super(Options, self).__setitem__(key, value) - - def __delitem__(self, key): - self.itemlist.remove(key) - super(Options, self).__delitem__(key) - - def __ne__(self, obj): - return not (isinstance(obj, Options) and sorted(self.items()) == sorted(obj.items())) - - def __str__(self): - ret = [] - for k, v in self.items(): - if v is None: - ret.append(k) - else: - ret.append('%s=%s' % (k, v)) - return ','.join(ret) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/system/dconf.py b/ansible_collections/community/general/plugins/modules/system/dconf.py deleted file mode 100644 index 636ca536..00000000 --- a/ansible_collections/community/general/plugins/modules/system/dconf.py +++ /dev/null @@ -1,387 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, Branko Majic -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' -module: dconf -author: - - "Branko Majic (@azaghal)" -short_description: Modify and read dconf database -description: - - This module allows modifications and reading of C(dconf) database. The module - is implemented as a wrapper around C(dconf) tool. Please see the dconf(1) man - page for more details. - - Since C(dconf) requires a running D-Bus session to change values, the module - will try to detect an existing session and reuse it, or run the tool via - C(dbus-run-session). -notes: - - This module depends on C(psutil) Python library (version 4.0.0 and upwards), - C(dconf), C(dbus-send), and C(dbus-run-session) binaries. Depending on - distribution you are using, you may need to install additional packages to - have these available. - - Detection of existing, running D-Bus session, required to change settings - via C(dconf), is not 100% reliable due to implementation details of D-Bus - daemon itself. This might lead to running applications not picking-up - changes on the fly if options are changed via Ansible and - C(dbus-run-session). - - Keep in mind that the C(dconf) CLI tool, which this module wraps around, - utilises an unusual syntax for the values (GVariant). For example, if you - wanted to provide a string value, the correct syntax would be - C(value="'myvalue'") - with single quotes as part of the Ansible parameter - value. - - When using loops in combination with a value like - :code:`"[('xkb', 'us'), ('xkb', 'se')]"`, you need to be aware of possible - type conversions. Applying a filter :code:`"{{ item.value | string }}"` - to the parameter variable can avoid potential conversion problems. - - The easiest way to figure out exact syntax/value you need to provide for a - key is by making the configuration change in application affected by the - key, and then having a look at value set via commands C(dconf dump - /path/to/dir/) or C(dconf read /path/to/key). -options: - key: - type: str - required: true - description: - - A dconf key to modify or read from the dconf database. - value: - type: str - required: false - description: - - Value to set for the specified dconf key. Value should be specified in - GVariant format. Due to complexity of this format, it is best to have a - look at existing values in the dconf database. - - Required for I(state=present). - state: - type: str - required: false - default: present - choices: [ 'read', 'present', 'absent' ] - description: - - The action to take upon the key/value. -''' - -RETURN = r""" -value: - description: value associated with the requested key - returned: success, state was "read" - type: str - sample: "'Default'" -""" - -EXAMPLES = r""" -- name: Configure available keyboard layouts in Gnome - community.general.dconf: - key: "/org/gnome/desktop/input-sources/sources" - value: "[('xkb', 'us'), ('xkb', 'se')]" - state: present - -- name: Read currently available keyboard layouts in Gnome - community.general.dconf: - key: "/org/gnome/desktop/input-sources/sources" - state: read - register: keyboard_layouts - -- name: Reset the available keyboard layouts in Gnome - community.general.dconf: - key: "/org/gnome/desktop/input-sources/sources" - state: absent - -- name: Configure available keyboard layouts in Cinnamon - community.general.dconf: - key: "/org/gnome/libgnomekbd/keyboard/layouts" - value: "['us', 'se']" - state: present - -- name: Read currently available keyboard layouts in Cinnamon - community.general.dconf: - key: "/org/gnome/libgnomekbd/keyboard/layouts" - state: read - register: keyboard_layouts - -- name: Reset the available keyboard layouts in Cinnamon - community.general.dconf: - key: "/org/gnome/libgnomekbd/keyboard/layouts" - state: absent - -- name: Disable desktop effects in Cinnamon - community.general.dconf: - key: "/org/cinnamon/desktop-effects" - value: "false" - state: present -""" - - -import os -import traceback - -PSUTIL_IMP_ERR = None -try: - import psutil - HAS_PSUTIL = True -except ImportError: - PSUTIL_IMP_ERR = traceback.format_exc() - HAS_PSUTIL = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class DBusWrapper(object): - """ - Helper class that can be used for running a command with a working D-Bus - session. - - If possible, command will be run against an existing D-Bus session, - otherwise the session will be spawned via dbus-run-session. - - Example usage: - - dbus_wrapper = DBusWrapper(ansible_module) - dbus_wrapper.run_command(["printenv", "DBUS_SESSION_BUS_ADDRESS"]) - """ - - def __init__(self, module): - """ - Initialises an instance of the class. - - :param module: Ansible module instance used to signal failures and run commands. - :type module: AnsibleModule - """ - - # Store passed-in arguments and set-up some defaults. - self.module = module - - # Try to extract existing D-Bus session address. - self.dbus_session_bus_address = self._get_existing_dbus_session() - - # If no existing D-Bus session was detected, check if dbus-run-session - # is available. - if self.dbus_session_bus_address is None: - self.dbus_run_session_cmd = self.module.get_bin_path('dbus-run-session', required=True) - - def _get_existing_dbus_session(self): - """ - Detects and returns an existing D-Bus session bus address. - - :returns: string -- D-Bus session bus address. If a running D-Bus session was not detected, returns None. - """ - - # We'll be checking the processes of current user only. - uid = os.getuid() - - # Go through all the pids for this user, try to extract the D-Bus - # session bus address from environment, and ensure it is possible to - # connect to it. - self.module.debug("Trying to detect existing D-Bus user session for user: %d" % uid) - - for pid in psutil.pids(): - try: - process = psutil.Process(pid) - process_real_uid, dummy, dummy = process.uids() - if process_real_uid == uid and 'DBUS_SESSION_BUS_ADDRESS' in process.environ(): - dbus_session_bus_address_candidate = process.environ()['DBUS_SESSION_BUS_ADDRESS'] - self.module.debug("Found D-Bus user session candidate at address: %s" % dbus_session_bus_address_candidate) - dbus_send_cmd = self.module.get_bin_path('dbus-send', required=True) - command = [dbus_send_cmd, '--address=%s' % dbus_session_bus_address_candidate, '--type=signal', '/', 'com.example.test'] - rc, dummy, dummy = self.module.run_command(command) - - if rc == 0: - self.module.debug("Verified D-Bus user session candidate as usable at address: %s" % dbus_session_bus_address_candidate) - - return dbus_session_bus_address_candidate - - # This can happen with things like SSH sessions etc. - except psutil.AccessDenied: - pass - # Process has disappeared while inspecting it - except psutil.NoSuchProcess: - pass - - self.module.debug("Failed to find running D-Bus user session, will use dbus-run-session") - - return None - - def run_command(self, command): - """ - Runs the specified command within a functional D-Bus session. Command is - effectively passed-on to AnsibleModule.run_command() method, with - modification for using dbus-run-session if necessary. - - :param command: Command to run, including parameters. Each element of the list should be a string. - :type module: list - - :returns: tuple(result_code, standard_output, standard_error) -- Result code, standard output, and standard error from running the command. - """ - - if self.dbus_session_bus_address is None: - self.module.debug("Using dbus-run-session wrapper for running commands.") - command = [self.dbus_run_session_cmd] + command - rc, out, err = self.module.run_command(command) - - if self.dbus_session_bus_address is None and rc == 127: - self.module.fail_json(msg="Failed to run passed-in command, dbus-run-session faced an internal error: %s" % err) - else: - extra_environment = {'DBUS_SESSION_BUS_ADDRESS': self.dbus_session_bus_address} - rc, out, err = self.module.run_command(command, environ_update=extra_environment) - - return rc, out, err - - -class DconfPreference(object): - - def __init__(self, module, check_mode=False): - """ - Initialises instance of the class. - - :param module: Ansible module instance used to signal failures and run commands. - :type module: AnsibleModule - - :param check_mode: Specify whether to only check if a change should be made or if to actually make a change. - :type check_mode: bool - """ - - self.module = module - self.check_mode = check_mode - # Check if dconf binary exists - self.dconf_bin = self.module.get_bin_path('dconf', required=True) - - def read(self, key): - """ - Retrieves current value associated with the dconf key. - - If an error occurs, a call will be made to AnsibleModule.fail_json. - - :returns: string -- Value assigned to the provided key. If the value is not set for specified key, returns None. - """ - command = [self.dconf_bin, "read", key] - - rc, out, err = self.module.run_command(command) - - if rc != 0: - self.module.fail_json(msg='dconf failed while reading the value with error: %s' % err, - out=out, - err=err) - - if out == '': - value = None - else: - value = out.rstrip('\n') - - return value - - def write(self, key, value): - """ - Writes the value for specified key. - - If an error occurs, a call will be made to AnsibleModule.fail_json. - - :param key: dconf key for which the value should be set. Should be a full path. - :type key: str - - :param value: Value to set for the specified dconf key. Should be specified in GVariant format. - :type value: str - - :returns: bool -- True if a change was made, False if no change was required. - """ - # If no change is needed (or won't be done due to check_mode), notify - # caller straight away. - if value == self.read(key): - return False - elif self.check_mode: - return True - - # Set-up command to run. Since DBus is needed for write operation, wrap - # dconf command dbus-launch. - command = [self.dconf_bin, "write", key, value] - - # Run the command and fetch standard return code, stdout, and stderr. - dbus_wrapper = DBusWrapper(self.module) - rc, out, err = dbus_wrapper.run_command(command) - - if rc != 0: - self.module.fail_json(msg='dconf failed while write the value with error: %s' % err, - out=out, - err=err) - - # Value was changed. - return True - - def reset(self, key): - """ - Returns value for the specified key (removes it from user configuration). - - If an error occurs, a call will be made to AnsibleModule.fail_json. - - :param key: dconf key to reset. Should be a full path. - :type key: str - - :returns: bool -- True if a change was made, False if no change was required. - """ - - # Read the current value first. - current_value = self.read(key) - - # No change was needed, key is not set at all, or just notify user if we - # are in check mode. - if current_value is None: - return False - elif self.check_mode: - return True - - # Set-up command to run. Since DBus is needed for reset operation, wrap - # dconf command dbus-launch. - command = [self.dconf_bin, "reset", key] - - # Run the command and fetch standard return code, stdout, and stderr. - dbus_wrapper = DBusWrapper(self.module) - rc, out, err = dbus_wrapper.run_command(command) - - if rc != 0: - self.module.fail_json(msg='dconf failed while reseting the value with error: %s' % err, - out=out, - err=err) - - # Value was changed. - return True - - -def main(): - # Setup the Ansible module - module = AnsibleModule( - argument_spec=dict( - state=dict(default='present', choices=['present', 'absent', 'read']), - key=dict(required=True, type='str', no_log=False), - value=dict(required=False, default=None, type='str'), - ), - supports_check_mode=True - ) - - if not HAS_PSUTIL: - module.fail_json(msg=missing_required_lib("psutil"), exception=PSUTIL_IMP_ERR) - - # If present state was specified, value must be provided. - if module.params['state'] == 'present' and module.params['value'] is None: - module.fail_json(msg='State "present" requires "value" to be set.') - - # Create wrapper instance. - dconf = DconfPreference(module, module.check_mode) - - # Process based on different states. - if module.params['state'] == 'read': - value = dconf.read(module.params['key']) - module.exit_json(changed=False, value=value) - elif module.params['state'] == 'present': - changed = dconf.write(module.params['key'], module.params['value']) - module.exit_json(changed=changed) - elif module.params['state'] == 'absent': - changed = dconf.reset(module.params['key']) - module.exit_json(changed=changed) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/system/dpkg_divert.py b/ansible_collections/community/general/plugins/modules/system/dpkg_divert.py deleted file mode 100644 index 709d35b8..00000000 --- a/ansible_collections/community/general/plugins/modules/system/dpkg_divert.py +++ /dev/null @@ -1,371 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017-2020, Yann Amar -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: dpkg_divert -short_description: Override a debian package's version of a file -version_added: '0.2.0' -author: - - quidame (@quidame) -description: - - A diversion is for C(dpkg) the knowledge that only a given package - (or the local administrator) is allowed to install a file at a given - location. Other packages shipping their own version of this file will - be forced to I(divert) it, i.e. to install it at another location. It - allows one to keep changes in a file provided by a debian package by - preventing its overwrite at package upgrade. - - This module manages diversions of debian packages files using the - C(dpkg-divert) commandline tool. It can either create or remove a - diversion for a given file, but also update an existing diversion - to modify its I(holder) and/or its I(divert) location. -options: - path: - description: - - The original and absolute path of the file to be diverted or - undiverted. This path is unique, i.e. it is not possible to get - two diversions for the same I(path). - required: true - type: path - state: - description: - - When I(state=absent), remove the diversion of the specified - I(path); when I(state=present), create the diversion if it does - not exist, or update its package I(holder) or I(divert) location, - if it already exists. - type: str - default: present - choices: [absent, present] - holder: - description: - - The name of the package whose copy of file is not diverted, also - known as the diversion holder or the package the diversion belongs - to. - - The actual package does not have to be installed or even to exist - for its name to be valid. If not specified, the diversion is hold - by 'LOCAL', that is reserved by/for dpkg for local diversions. - - This parameter is ignored when I(state=absent). - type: str - divert: - description: - - The location where the versions of file will be diverted. - - Default is to add suffix C(.distrib) to the file path. - - This parameter is ignored when I(state=absent). - type: path - rename: - description: - - Actually move the file aside (when I(state=present)) or back (when - I(state=absent)), but only when changing the state of the diversion. - This parameter has no effect when attempting to add a diversion that - already exists or when removing an unexisting one. - - Unless I(force=true), renaming fails if the destination file already - exists (this lock being a dpkg-divert feature, and bypassing it being - a module feature). - type: bool - default: no - force: - description: - - When I(rename=true) and I(force=true), renaming is performed even if - the target of the renaming exists, i.e. the existing contents of the - file at this location will be lost. - - This parameter is ignored when I(rename=false). - type: bool - default: no -notes: - - This module supports I(check_mode) and I(diff). -requirements: - - dpkg-divert >= 1.15.0 (Debian family) -''' - -EXAMPLES = r''' -- name: Divert /usr/bin/busybox to /usr/bin/busybox.distrib and keep file in place - community.general.dpkg_divert: - path: /usr/bin/busybox - -- name: Divert /usr/bin/busybox by package 'branding' - community.general.dpkg_divert: - path: /usr/bin/busybox - holder: branding - -- name: Divert and rename busybox to busybox.dpkg-divert - community.general.dpkg_divert: - path: /usr/bin/busybox - divert: /usr/bin/busybox.dpkg-divert - rename: yes - -- name: Remove the busybox diversion and move the diverted file back - community.general.dpkg_divert: - path: /usr/bin/busybox - state: absent - rename: yes - force: yes -''' - -RETURN = r''' -commands: - description: The dpkg-divert commands ran internally by the module. - type: list - returned: on_success - elements: str - sample: |- - [ - "/usr/bin/dpkg-divert --no-rename --remove /etc/foobarrc", - "/usr/bin/dpkg-divert --package ansible --no-rename --add /etc/foobarrc" - ] -messages: - description: The dpkg-divert relevant messages (stdout or stderr). - type: list - returned: on_success - elements: str - sample: |- - [ - "Removing 'local diversion of /etc/foobarrc to /etc/foobarrc.distrib'", - "Adding 'diversion of /etc/foobarrc to /etc/foobarrc.distrib by ansible'" - ] -diversion: - description: The status of the diversion after task execution. - type: dict - returned: always - contains: - divert: - description: The location of the diverted file. - type: str - holder: - description: The package holding the diversion. - type: str - path: - description: The path of the file to divert/undivert. - type: str - state: - description: The state of the diversion. - type: str - sample: |- - { - "divert": "/etc/foobarrc.distrib", - "holder": "LOCAL", - "path": "/etc/foobarrc" - "state": "present" - } -''' - - -import re -import os - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_bytes, to_native - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - - -def diversion_state(module, command, path): - diversion = dict(path=path, state='absent', divert=None, holder=None) - rc, out, err = module.run_command([command, '--listpackage', path], check_rc=True) - if out: - diversion['state'] = 'present' - diversion['holder'] = out.rstrip() - rc, out, err = module.run_command([command, '--truename', path], check_rc=True) - diversion['divert'] = out.rstrip() - return diversion - - -def main(): - module = AnsibleModule( - argument_spec=dict( - path=dict(required=True, type='path'), - state=dict(required=False, type='str', default='present', choices=['absent', 'present']), - holder=dict(required=False, type='str'), - divert=dict(required=False, type='path'), - rename=dict(required=False, type='bool', default=False), - force=dict(required=False, type='bool', default=False), - ), - supports_check_mode=True, - ) - - path = module.params['path'] - state = module.params['state'] - holder = module.params['holder'] - divert = module.params['divert'] - rename = module.params['rename'] - force = module.params['force'] - - diversion_wanted = dict(path=path, state=state) - changed = False - - DPKG_DIVERT = module.get_bin_path('dpkg-divert', required=True) - MAINCOMMAND = [DPKG_DIVERT] - - # Option --listpackage is needed and comes with 1.15.0 - rc, stdout, stderr = module.run_command([DPKG_DIVERT, '--version'], check_rc=True) - [current_version] = [x for x in stdout.splitlines()[0].split() if re.match('^[0-9]+[.][0-9]', x)] - if LooseVersion(current_version) < LooseVersion("1.15.0"): - module.fail_json(msg="Unsupported dpkg version (<1.15.0).") - no_rename_is_supported = (LooseVersion(current_version) >= LooseVersion("1.19.1")) - - b_path = to_bytes(path, errors='surrogate_or_strict') - path_exists = os.path.exists(b_path) - # Used for things not doable with a single dpkg-divert command (as forced - # renaming of files, and diversion's 'holder' or 'divert' updates). - target_exists = False - truename_exists = False - - diversion_before = diversion_state(module, DPKG_DIVERT, path) - if diversion_before['state'] == 'present': - b_divert = to_bytes(diversion_before['divert'], errors='surrogate_or_strict') - truename_exists = os.path.exists(b_divert) - - # Append options as requested in the task parameters, but ignore some of - # them when removing the diversion. - if rename: - MAINCOMMAND.append('--rename') - elif no_rename_is_supported: - MAINCOMMAND.append('--no-rename') - - if state == 'present': - if holder and holder != 'LOCAL': - MAINCOMMAND.extend(['--package', holder]) - diversion_wanted['holder'] = holder - else: - MAINCOMMAND.append('--local') - diversion_wanted['holder'] = 'LOCAL' - - if divert: - MAINCOMMAND.extend(['--divert', divert]) - target = divert - else: - target = '%s.distrib' % path - - MAINCOMMAND.extend(['--add', path]) - diversion_wanted['divert'] = target - b_target = to_bytes(target, errors='surrogate_or_strict') - target_exists = os.path.exists(b_target) - - else: - MAINCOMMAND.extend(['--remove', path]) - diversion_wanted['divert'] = None - diversion_wanted['holder'] = None - - # Start to populate the returned objects. - diversion = diversion_before.copy() - maincommand = ' '.join(MAINCOMMAND) - commands = [maincommand] - - if module.check_mode or diversion_wanted == diversion_before: - MAINCOMMAND.insert(1, '--test') - diversion_after = diversion_wanted - - # Just try and see - rc, stdout, stderr = module.run_command(MAINCOMMAND) - - if rc == 0: - messages = [stdout.rstrip()] - - # else... cases of failure with dpkg-divert are: - # - The diversion does not belong to the same package (or LOCAL) - # - The divert filename is not the same (e.g. path.distrib != path.divert) - # - The renaming is forbidden by dpkg-divert (i.e. both the file and the - # diverted file exist) - - elif state != diversion_before['state']: - # There should be no case with 'divert' and 'holder' when creating the - # diversion from none, and they're ignored when removing the diversion. - # So this is all about renaming... - if rename and path_exists and ( - (state == 'absent' and truename_exists) or - (state == 'present' and target_exists)): - if not force: - msg = "Set 'force' param to True to force renaming of files." - module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg, - stderr=stderr, stdout=stdout, diversion=diversion) - else: - msg = "Unexpected error while changing state of the diversion." - module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg, - stderr=stderr, stdout=stdout, diversion=diversion) - - to_remove = path - if state == 'present': - to_remove = target - - if not module.check_mode: - try: - b_remove = to_bytes(to_remove, errors='surrogate_or_strict') - os.unlink(b_remove) - except OSError as e: - msg = 'Failed to remove %s: %s' % (to_remove, to_native(e)) - module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg, - stderr=stderr, stdout=stdout, diversion=diversion) - rc, stdout, stderr = module.run_command(MAINCOMMAND, check_rc=True) - - messages = [stdout.rstrip()] - - # The situation is that we want to modify the settings (holder or divert) - # of an existing diversion. dpkg-divert does not handle this, and we have - # to remove the existing diversion first, and then set a new one. - else: - RMDIVERSION = [DPKG_DIVERT, '--remove', path] - if no_rename_is_supported: - RMDIVERSION.insert(1, '--no-rename') - rmdiversion = ' '.join(RMDIVERSION) - - if module.check_mode: - RMDIVERSION.insert(1, '--test') - - if rename: - MAINCOMMAND.remove('--rename') - if no_rename_is_supported: - MAINCOMMAND.insert(1, '--no-rename') - maincommand = ' '.join(MAINCOMMAND) - - commands = [rmdiversion, maincommand] - rc, rmdout, rmderr = module.run_command(RMDIVERSION, check_rc=True) - - if module.check_mode: - messages = [rmdout.rstrip(), 'Running in check mode'] - else: - rc, stdout, stderr = module.run_command(MAINCOMMAND, check_rc=True) - messages = [rmdout.rstrip(), stdout.rstrip()] - - # Avoid if possible to orphan files (i.e. to dereference them in diversion - # database but let them in place), but do not make renaming issues fatal. - # BTW, this module is not about state of files involved in the diversion. - old = diversion_before['divert'] - new = diversion_wanted['divert'] - if new != old: - b_old = to_bytes(old, errors='surrogate_or_strict') - b_new = to_bytes(new, errors='surrogate_or_strict') - if os.path.exists(b_old) and not os.path.exists(b_new): - try: - os.rename(b_old, b_new) - except OSError as e: - pass - - if not module.check_mode: - diversion_after = diversion_state(module, DPKG_DIVERT, path) - - diversion = diversion_after.copy() - diff = dict() - if module._diff: - diff['before'] = diversion_before - diff['after'] = diversion_after - - if diversion_after != diversion_before: - changed = True - - if diversion_after == diversion_wanted: - module.exit_json(changed=changed, diversion=diversion, - commands=commands, messages=messages, diff=diff) - else: - msg = "Unexpected error: see stdout and stderr for details." - module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg, - stderr=stderr, stdout=stdout, diversion=diversion) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/system/facter.py b/ansible_collections/community/general/plugins/modules/system/facter.py deleted file mode 100644 index abd2ebc3..00000000 --- a/ansible_collections/community/general/plugins/modules/system/facter.py +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2012, Michael DeHaan -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: facter -short_description: Runs the discovery program I(facter) on the remote system -description: - - Runs the C(facter) discovery program - (U(https://github.com/puppetlabs/facter)) on the remote system, returning - JSON data that can be useful for inventory purposes. -options: - arguments: - description: - - Specifies arguments for facter. - type: list - elements: str -requirements: - - facter - - ruby-json -author: - - Ansible Core Team - - Michael DeHaan -''' - -EXAMPLES = ''' -# Example command-line invocation -# ansible www.example.net -m facter - -- name: Execute facter no arguments - community.general.facter: - -- name: Execute facter with arguments - community.general.facter: - arguments: - - -p - - system_uptime - - timezone - - is_virtual -''' -import json - -from ansible.module_utils.basic import AnsibleModule - - -def main(): - module = AnsibleModule( - argument_spec=dict( - arguments=dict(required=False, type='list', elements='str') - ) - ) - - facter_path = module.get_bin_path( - 'facter', - opt_dirs=['/opt/puppetlabs/bin']) - - cmd = [facter_path, "--json"] - if module.params['arguments']: - cmd += module.params['arguments'] - - rc, out, err = module.run_command(cmd, check_rc=True) - module.exit_json(**json.loads(out)) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/system/filesystem.py b/ansible_collections/community/general/plugins/modules/system/filesystem.py deleted file mode 100644 index 2245d341..00000000 --- a/ansible_collections/community/general/plugins/modules/system/filesystem.py +++ /dev/null @@ -1,579 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, quidame -# Copyright: (c) 2013, Alexander Bulimov -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -author: - - Alexander Bulimov (@abulimov) - - quidame (@quidame) -module: filesystem -short_description: Makes a filesystem -description: - - This module creates a filesystem. -options: - state: - description: - - If C(state=present), the filesystem is created if it doesn't already - exist, that is the default behaviour if I(state) is omitted. - - If C(state=absent), filesystem signatures on I(dev) are wiped if it - contains a filesystem (as known by C(blkid)). - - When C(state=absent), all other options but I(dev) are ignored, and the - module doesn't fail if the device I(dev) doesn't actually exist. - type: str - choices: [ present, absent ] - default: present - version_added: 1.3.0 - fstype: - choices: [ btrfs, ext2, ext3, ext4, ext4dev, f2fs, lvm, ocfs2, reiserfs, xfs, vfat, swap, ufs ] - description: - - Filesystem type to be created. This option is required with - C(state=present) (or if I(state) is omitted). - - ufs support has been added in community.general 3.4.0. - type: str - aliases: [type] - dev: - description: - - Target path to block device (Linux) or character device (FreeBSD) or - regular file (both). - - When setting Linux-specific filesystem types on FreeBSD, this module - only works when applying to regular files, aka disk images. - - Currently C(lvm) (Linux-only) and C(ufs) (FreeBSD-only) don't support - a regular file as their target I(dev). - - Support for character devices on FreeBSD has been added in community.general 3.4.0. - type: path - required: yes - aliases: [device] - force: - description: - - If C(yes), allows to create new filesystem on devices that already has filesystem. - type: bool - default: 'no' - resizefs: - description: - - If C(yes), if the block device and filesystem size differ, grow the filesystem into the space. - - Supported for C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs), C(ufs) and C(vfat) filesystems. - Attempts to resize other filesystem types will fail. - - XFS Will only grow if mounted. Currently, the module is based on commands - from C(util-linux) package to perform operations, so resizing of XFS is - not supported on FreeBSD systems. - - vFAT will likely fail if fatresize < 1.04. - type: bool - default: 'no' - opts: - description: - - List of options to be passed to mkfs command. - type: str -requirements: - - Uses specific tools related to the I(fstype) for creating or resizing a - filesystem (from packages e2fsprogs, xfsprogs, dosfstools, and so on). - - Uses generic tools mostly related to the Operating System (Linux or - FreeBSD) or available on both, as C(blkid). - - On FreeBSD, either C(util-linux) or C(e2fsprogs) package is required. -notes: - - Potential filesystems on I(dev) are checked using C(blkid). In case C(blkid) - is unable to detect a filesystem (and in case C(fstyp) on FreeBSD is also - unable to detect a filesystem), this filesystem is overwritten even if - I(force) is C(no). - - On FreeBSD systems, both C(e2fsprogs) and C(util-linux) packages provide - a C(blkid) command that is compatible with this module. However, these - packages conflict with each other, and only the C(util-linux) package - provides the command required to not fail when I(state=absent). - - This module supports I(check_mode). -seealso: - - module: community.general.filesize - - module: ansible.posix.mount -''' - -EXAMPLES = ''' -- name: Create a ext2 filesystem on /dev/sdb1 - community.general.filesystem: - fstype: ext2 - dev: /dev/sdb1 - -- name: Create a ext4 filesystem on /dev/sdb1 and check disk blocks - community.general.filesystem: - fstype: ext4 - dev: /dev/sdb1 - opts: -cc - -- name: Blank filesystem signature on /dev/sdb1 - community.general.filesystem: - dev: /dev/sdb1 - state: absent - -- name: Create a filesystem on top of a regular file - community.general.filesystem: - dev: /path/to/disk.img - fstype: vfat -''' - -import os -import platform -import re -import stat - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - - -class Device(object): - def __init__(self, module, path): - self.module = module - self.path = path - - def size(self): - """ Return size in bytes of device. Returns int """ - statinfo = os.stat(self.path) - if stat.S_ISBLK(statinfo.st_mode): - blockdev_cmd = self.module.get_bin_path("blockdev", required=True) - dummy, out, dummy = self.module.run_command([blockdev_cmd, "--getsize64", self.path], check_rc=True) - devsize_in_bytes = int(out) - elif stat.S_ISCHR(statinfo.st_mode) and platform.system() == 'FreeBSD': - diskinfo_cmd = self.module.get_bin_path("diskinfo", required=True) - dummy, out, dummy = self.module.run_command([diskinfo_cmd, self.path], check_rc=True) - devsize_in_bytes = int(out.split()[2]) - elif os.path.isfile(self.path): - devsize_in_bytes = os.path.getsize(self.path) - else: - self.module.fail_json(changed=False, msg="Target device not supported: %s" % self) - - return devsize_in_bytes - - def get_mountpoint(self): - """Return (first) mountpoint of device. Returns None when not mounted.""" - cmd_findmnt = self.module.get_bin_path("findmnt", required=True) - - # find mountpoint - rc, mountpoint, dummy = self.module.run_command([cmd_findmnt, "--mtab", "--noheadings", "--output", - "TARGET", "--source", self.path], check_rc=False) - if rc != 0: - mountpoint = None - else: - mountpoint = mountpoint.split('\n')[0] - - return mountpoint - - def __str__(self): - return self.path - - -class Filesystem(object): - - MKFS = None - MKFS_FORCE_FLAGS = [] - INFO = None - GROW = None - GROW_MAX_SPACE_FLAGS = [] - GROW_MOUNTPOINT_ONLY = False - - LANG_ENV = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'} - - def __init__(self, module): - self.module = module - - @property - def fstype(self): - return type(self).__name__ - - def get_fs_size(self, dev): - """Return size in bytes of filesystem on device (integer). - Should query the info with a per-fstype command that can access the - device whenever it is mounted or not, and parse the command output. - Parser must ensure to return an integer, or raise a ValueError. - """ - raise NotImplementedError() - - def create(self, opts, dev): - if self.module.check_mode: - return - - mkfs = self.module.get_bin_path(self.MKFS, required=True) - cmd = [mkfs] + self.MKFS_FORCE_FLAGS + opts + [str(dev)] - self.module.run_command(cmd, check_rc=True) - - def wipefs(self, dev): - if self.module.check_mode: - return - - # wipefs comes with util-linux package (as 'blockdev' & 'findmnt' above) - # that is ported to FreeBSD. The use of dd as a portable fallback is - # not doable here if it needs get_mountpoint() (to prevent corruption of - # a mounted filesystem), since 'findmnt' is not available on FreeBSD, - # even in util-linux port for this OS. - wipefs = self.module.get_bin_path('wipefs', required=True) - cmd = [wipefs, "--all", str(dev)] - self.module.run_command(cmd, check_rc=True) - - def grow_cmd(self, target): - """Build and return the resizefs commandline as list.""" - cmdline = [self.module.get_bin_path(self.GROW, required=True)] - cmdline += self.GROW_MAX_SPACE_FLAGS + [target] - return cmdline - - def grow(self, dev): - """Get dev and fs size and compare. Returns stdout of used command.""" - devsize_in_bytes = dev.size() - - try: - fssize_in_bytes = self.get_fs_size(dev) - except NotImplementedError: - self.module.fail_json(msg="module does not support resizing %s filesystem yet" % self.fstype) - except ValueError as err: - self.module.warn("unable to process %s output '%s'" % (self.INFO, to_native(err))) - self.module.fail_json(msg="unable to process %s output for %s" % (self.INFO, dev)) - - if not fssize_in_bytes < devsize_in_bytes: - self.module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (self.fstype, dev)) - elif self.module.check_mode: - self.module.exit_json(changed=True, msg="resizing filesystem %s on device %s" % (self.fstype, dev)) - - if self.GROW_MOUNTPOINT_ONLY: - mountpoint = dev.get_mountpoint() - if not mountpoint: - self.module.fail_json(msg="%s needs to be mounted for %s operations" % (dev, self.fstype)) - grow_target = mountpoint - else: - grow_target = str(dev) - - dummy, out, dummy = self.module.run_command(self.grow_cmd(grow_target), check_rc=True) - return out - - -class Ext(Filesystem): - MKFS_FORCE_FLAGS = ['-F'] - INFO = 'tune2fs' - GROW = 'resize2fs' - - def get_fs_size(self, dev): - """Get Block count and Block size and return their product.""" - cmd = self.module.get_bin_path(self.INFO, required=True) - dummy, out, dummy = self.module.run_command([cmd, '-l', str(dev)], check_rc=True, environ_update=self.LANG_ENV) - - block_count = block_size = None - for line in out.splitlines(): - if 'Block count:' in line: - block_count = int(line.split(':')[1].strip()) - elif 'Block size:' in line: - block_size = int(line.split(':')[1].strip()) - if None not in (block_size, block_count): - break - else: - raise ValueError(out) - - return block_size * block_count - - -class Ext2(Ext): - MKFS = 'mkfs.ext2' - - -class Ext3(Ext): - MKFS = 'mkfs.ext3' - - -class Ext4(Ext): - MKFS = 'mkfs.ext4' - - -class XFS(Filesystem): - MKFS = 'mkfs.xfs' - MKFS_FORCE_FLAGS = ['-f'] - INFO = 'xfs_info' - GROW = 'xfs_growfs' - GROW_MOUNTPOINT_ONLY = True - - def get_fs_size(self, dev): - """Get bsize and blocks and return their product.""" - cmdline = [self.module.get_bin_path(self.INFO, required=True)] - - # Depending on the versions, xfs_info is able to get info from the - # device, whenever it is mounted or not, or only if unmounted, or - # only if mounted, or not at all. For any version until now, it is - # able to query info from the mountpoint. So try it first, and use - # device as the last resort: it may or may not work. - mountpoint = dev.get_mountpoint() - if mountpoint: - cmdline += [mountpoint] - else: - cmdline += [str(dev)] - dummy, out, dummy = self.module.run_command(cmdline, check_rc=True, environ_update=self.LANG_ENV) - - block_size = block_count = None - for line in out.splitlines(): - col = line.split('=') - if col[0].strip() == 'data': - if col[1].strip() == 'bsize': - block_size = int(col[2].split()[0]) - if col[2].split()[1] == 'blocks': - block_count = int(col[3].split(',')[0]) - if None not in (block_size, block_count): - break - else: - raise ValueError(out) - - return block_size * block_count - - -class Reiserfs(Filesystem): - MKFS = 'mkfs.reiserfs' - MKFS_FORCE_FLAGS = ['-q'] - - -class Btrfs(Filesystem): - MKFS = 'mkfs.btrfs' - - def __init__(self, module): - super(Btrfs, self).__init__(module) - mkfs = self.module.get_bin_path(self.MKFS, required=True) - dummy, stdout, stderr = self.module.run_command([mkfs, '--version'], check_rc=True) - match = re.search(r" v([0-9.]+)", stdout) - if not match: - # v0.20-rc1 use stderr - match = re.search(r" v([0-9.]+)", stderr) - if match: - # v0.20-rc1 doesn't have --force parameter added in following version v3.12 - if LooseVersion(match.group(1)) >= LooseVersion('3.12'): - self.MKFS_FORCE_FLAGS = ['-f'] - else: - # assume version is greater or equal to 3.12 - self.MKFS_FORCE_FLAGS = ['-f'] - self.module.warn('Unable to identify mkfs.btrfs version (%r, %r)' % (stdout, stderr)) - - -class Ocfs2(Filesystem): - MKFS = 'mkfs.ocfs2' - MKFS_FORCE_FLAGS = ['-Fx'] - - -class F2fs(Filesystem): - MKFS = 'mkfs.f2fs' - INFO = 'dump.f2fs' - GROW = 'resize.f2fs' - - def __init__(self, module): - super(F2fs, self).__init__(module) - mkfs = self.module.get_bin_path(self.MKFS, required=True) - dummy, out, dummy = self.module.run_command([mkfs, os.devnull], check_rc=False, environ_update=self.LANG_ENV) - # Looking for " F2FS-tools: mkfs.f2fs Ver: 1.10.0 (2018-01-30)" - # mkfs.f2fs displays version since v1.2.0 - match = re.search(r"F2FS-tools: mkfs.f2fs Ver: ([0-9.]+) \(", out) - if match is not None: - # Since 1.9.0, mkfs.f2fs check overwrite before make filesystem - # before that version -f switch wasn't used - if LooseVersion(match.group(1)) >= LooseVersion('1.9.0'): - self.MKFS_FORCE_FLAGS = ['-f'] - - def get_fs_size(self, dev): - """Get sector size and total FS sectors and return their product.""" - cmd = self.module.get_bin_path(self.INFO, required=True) - dummy, out, dummy = self.module.run_command([cmd, str(dev)], check_rc=True, environ_update=self.LANG_ENV) - sector_size = sector_count = None - for line in out.splitlines(): - if 'Info: sector size = ' in line: - # expected: 'Info: sector size = 512' - sector_size = int(line.split()[4]) - elif 'Info: total FS sectors = ' in line: - # expected: 'Info: total FS sectors = 102400 (50 MB)' - sector_count = int(line.split()[5]) - if None not in (sector_size, sector_count): - break - else: - raise ValueError(out) - - return sector_size * sector_count - - -class VFAT(Filesystem): - INFO = 'fatresize' - GROW = 'fatresize' - GROW_MAX_SPACE_FLAGS = ['-s', 'max'] - - def __init__(self, module): - super(VFAT, self).__init__(module) - if platform.system() == 'FreeBSD': - self.MKFS = 'newfs_msdos' - else: - self.MKFS = 'mkfs.vfat' - - def get_fs_size(self, dev): - """Get and return size of filesystem, in bytes.""" - cmd = self.module.get_bin_path(self.INFO, required=True) - dummy, out, dummy = self.module.run_command([cmd, '--info', str(dev)], check_rc=True, environ_update=self.LANG_ENV) - fssize = None - for line in out.splitlines()[1:]: - param, value = line.split(':', 1) - if param.strip() == 'Size': - fssize = int(value.strip()) - break - else: - raise ValueError(out) - - return fssize - - -class LVM(Filesystem): - MKFS = 'pvcreate' - MKFS_FORCE_FLAGS = ['-f'] - INFO = 'pvs' - GROW = 'pvresize' - - def get_fs_size(self, dev): - """Get and return PV size, in bytes.""" - cmd = self.module.get_bin_path(self.INFO, required=True) - dummy, size, dummy = self.module.run_command([cmd, '--noheadings', '-o', 'pv_size', '--units', 'b', '--nosuffix', str(dev)], check_rc=True) - pv_size = int(size) - return pv_size - - -class Swap(Filesystem): - MKFS = 'mkswap' - MKFS_FORCE_FLAGS = ['-f'] - - -class UFS(Filesystem): - MKFS = 'newfs' - INFO = 'dumpfs' - GROW = 'growfs' - GROW_MAX_SPACE_FLAGS = ['-y'] - - def get_fs_size(self, dev): - """Get providersize and fragment size and return their product.""" - cmd = self.module.get_bin_path(self.INFO, required=True) - dummy, out, dummy = self.module.run_command([cmd, str(dev)], check_rc=True, environ_update=self.LANG_ENV) - - fragmentsize = providersize = None - for line in out.splitlines(): - if line.startswith('fsize'): - fragmentsize = int(line.split()[1]) - elif 'providersize' in line: - providersize = int(line.split()[-1]) - if None not in (fragmentsize, providersize): - break - else: - raise ValueError(out) - - return fragmentsize * providersize - - -FILESYSTEMS = { - 'ext2': Ext2, - 'ext3': Ext3, - 'ext4': Ext4, - 'ext4dev': Ext4, - 'f2fs': F2fs, - 'reiserfs': Reiserfs, - 'xfs': XFS, - 'btrfs': Btrfs, - 'vfat': VFAT, - 'ocfs2': Ocfs2, - 'LVM2_member': LVM, - 'swap': Swap, - 'ufs': UFS, -} - - -def main(): - friendly_names = { - 'lvm': 'LVM2_member', - } - - fstypes = set(FILESYSTEMS.keys()) - set(friendly_names.values()) | set(friendly_names.keys()) - - # There is no "single command" to manipulate filesystems, so we map them all out and their options - module = AnsibleModule( - argument_spec=dict( - state=dict(type='str', default='present', choices=['present', 'absent']), - fstype=dict(type='str', aliases=['type'], choices=list(fstypes)), - dev=dict(type='path', required=True, aliases=['device']), - opts=dict(type='str'), - force=dict(type='bool', default=False), - resizefs=dict(type='bool', default=False), - ), - required_if=[ - ('state', 'present', ['fstype']) - ], - supports_check_mode=True, - ) - - state = module.params['state'] - dev = module.params['dev'] - fstype = module.params['fstype'] - opts = module.params['opts'] - force = module.params['force'] - resizefs = module.params['resizefs'] - - mkfs_opts = [] - if opts is not None: - mkfs_opts = opts.split() - - changed = False - - if not os.path.exists(dev): - msg = "Device %s not found." % dev - if state == "present": - module.fail_json(msg=msg) - else: - module.exit_json(msg=msg) - - dev = Device(module, dev) - - # In case blkid/fstyp isn't able to identify an existing filesystem, device - # is considered as empty, then this existing filesystem would be overwritten - # even if force isn't enabled. - cmd = module.get_bin_path('blkid', required=True) - rc, raw_fs, err = module.run_command([cmd, '-c', os.devnull, '-o', 'value', '-s', 'TYPE', str(dev)]) - fs = raw_fs.strip() - if not fs and platform.system() == 'FreeBSD': - cmd = module.get_bin_path('fstyp', required=True) - rc, raw_fs, err = module.run_command([cmd, str(dev)]) - fs = raw_fs.strip() - - if state == "present": - if fstype in friendly_names: - fstype = friendly_names[fstype] - - try: - klass = FILESYSTEMS[fstype] - except KeyError: - module.fail_json(changed=False, msg="module does not support this filesystem (%s) yet." % fstype) - - filesystem = klass(module) - - same_fs = fs and FILESYSTEMS.get(fs) == FILESYSTEMS[fstype] - if same_fs and not resizefs and not force: - module.exit_json(changed=False) - elif same_fs and resizefs: - if not filesystem.GROW: - module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % fstype) - - out = filesystem.grow(dev) - - module.exit_json(changed=True, msg=out) - elif fs and not force: - module.fail_json(msg="'%s' is already used as %s, use force=yes to overwrite" % (dev, fs), rc=rc, err=err) - - # create fs - filesystem.create(mkfs_opts, dev) - changed = True - - elif fs: - # wipe fs signatures - filesystem = Filesystem(module) - filesystem.wipefs(dev) - changed = True - - module.exit_json(changed=changed) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/system/gconftool2.py b/ansible_collections/community/general/plugins/modules/system/gconftool2.py deleted file mode 100644 index 6b9ce712..00000000 --- a/ansible_collections/community/general/plugins/modules/system/gconftool2.py +++ /dev/null @@ -1,233 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Kenneth D. Evensen -# Copyright: (c) 2017, Abhijeet Kasurde -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' -module: gconftool2 -author: - - Kenneth D. Evensen (@kevensen) -short_description: Edit GNOME Configurations -description: - - This module allows for the manipulation of GNOME 2 Configuration via - gconftool-2. Please see the gconftool-2(1) man pages for more details. -options: - key: - type: str - description: - - A GConf preference key is an element in the GConf repository - that corresponds to an application preference. See man gconftool-2(1) - required: yes - value: - type: str - description: - - Preference keys typically have simple values such as strings, - integers, or lists of strings and integers. This is ignored if the state - is "get". See man gconftool-2(1) - value_type: - type: str - description: - - The type of value being set. This is ignored if the state is "get". - choices: [ bool, float, int, string ] - state: - type: str - description: - - The action to take upon the key/value. - required: yes - choices: [ absent, get, present ] - config_source: - type: str - description: - - Specify a configuration source to use rather than the default path. - See man gconftool-2(1) - direct: - description: - - Access the config database directly, bypassing server. If direct is - specified then the config_source must be specified as well. - See man gconftool-2(1) - type: bool - default: 'no' -''' - -EXAMPLES = """ -- name: Change the widget font to "Serif 12" - community.general.gconftool2: - key: "/desktop/gnome/interface/font_name" - value_type: "string" - value: "Serif 12" -""" - -RETURN = ''' - key: - description: The key specified in the module parameters - returned: success - type: str - sample: /desktop/gnome/interface/font_name - value_type: - description: The type of the value that was changed - returned: success - type: str - sample: string - value: - description: The value of the preference key after executing the module - returned: success - type: str - sample: "Serif 12" -... -''' - -from ansible.module_utils.basic import AnsibleModule - - -class GConf2Preference(object): - def __init__(self, ansible, key, value_type, value, - direct=False, config_source=""): - self.ansible = ansible - self.key = key - self.value_type = value_type - self.value = value - self.config_source = config_source - self.direct = direct - - def value_already_set(self): - return False - - def call(self, call_type, fail_onerr=True): - """ Helper function to perform gconftool-2 operations """ - config_source = '' - direct = '' - changed = False - out = '' - - # If the configuration source is different from the default, create - # the argument - if self.config_source is not None and len(self.config_source) > 0: - config_source = "--config-source " + self.config_source - - # If direct is true, create the argument - if self.direct: - direct = "--direct" - - # Execute the call - cmd = "gconftool-2 " - try: - # If the call is "get", then we don't need as many parameters and - # we can ignore some - if call_type == 'get': - cmd += "--get {0}".format(self.key) - # Otherwise, we will use all relevant parameters - elif call_type == 'set': - cmd += "{0} {1} --type {2} --{3} {4} \"{5}\"".format(direct, - config_source, - self.value_type, - call_type, - self.key, - self.value) - elif call_type == 'unset': - cmd += "--unset {0}".format(self.key) - - # Start external command - rc, out, err = self.ansible.run_command(cmd, use_unsafe_shell=True) - - if len(err) > 0: - if fail_onerr: - self.ansible.fail_json(msg='gconftool-2 failed with ' - 'error: %s' % (str(err))) - else: - changed = True - - except OSError as exception: - self.ansible.fail_json(msg='gconftool-2 failed with exception: ' - '%s' % exception) - return changed, out.rstrip() - - -def main(): - # Setup the Ansible module - module = AnsibleModule( - argument_spec=dict( - key=dict(type='str', required=True, no_log=False), - value_type=dict(type='str', choices=['bool', 'float', 'int', 'string']), - value=dict(type='str'), - state=dict(type='str', required=True, choices=['absent', 'get', 'present']), - direct=dict(type='bool', default=False), - config_source=dict(type='str'), - ), - supports_check_mode=True - ) - - state_values = {"present": "set", "absent": "unset", "get": "get"} - - # Assign module values to dictionary values - key = module.params['key'] - value_type = module.params['value_type'] - if module.params['value'].lower() == "true": - value = "true" - elif module.params['value'] == "false": - value = "false" - else: - value = module.params['value'] - - state = state_values[module.params['state']] - direct = module.params['direct'] - config_source = module.params['config_source'] - - # Initialize some variables for later - change = False - new_value = '' - - if state != "get": - if value is None or value == "": - module.fail_json(msg='State %s requires "value" to be set' - % str(state)) - elif value_type is None or value_type == "": - module.fail_json(msg='State %s requires "value_type" to be set' - % str(state)) - - if direct and config_source is None: - module.fail_json(msg='If "direct" is "yes" then the ' + - '"config_source" must be specified') - elif not direct and config_source is not None: - module.fail_json(msg='If the "config_source" is specified ' + - 'then "direct" must be "yes"') - - # Create a gconf2 preference - gconf_pref = GConf2Preference(module, key, value_type, - value, direct, config_source) - # Now we get the current value, if not found don't fail - dummy, current_value = gconf_pref.call("get", fail_onerr=False) - - # Check if the current value equals the value we want to set. If not, make - # a change - if current_value != value: - # If check mode, we know a change would have occurred. - if module.check_mode: - # So we will set the change to True - change = True - # And set the new_value to the value that would have been set - new_value = value - # If not check mode make the change. - else: - change, new_value = gconf_pref.call(state) - # If the value we want to set is the same as the current_value, we will - # set the new_value to the current_value for reporting - else: - new_value = current_value - - facts = dict(gconftool2={'changed': change, - 'key': key, - 'value_type': value_type, - 'new_value': new_value, - 'previous_value': current_value, - 'playbook_value': module.params['value']}) - - module.exit_json(changed=change, ansible_facts=facts) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/system/homectl.py b/ansible_collections/community/general/plugins/modules/system/homectl.py deleted file mode 100644 index ff7a6195..00000000 --- a/ansible_collections/community/general/plugins/modules/system/homectl.py +++ /dev/null @@ -1,650 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2022, James Livulpi -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: homectl -author: - - "James Livulpi (@jameslivulpi)" -short_description: Manage user accounts with systemd-homed -version_added: 4.4.0 -description: - - Manages a user's home directory managed by systemd-homed. -options: - name: - description: - - The user name to create, remove, or update. - required: true - aliases: [ 'user', 'username' ] - type: str - password: - description: - - Set the user's password to this. - - Homed requires this value to be in cleartext on user creation and updating a user. - - The module takes the password and generates a password hash in SHA-512 with 10000 rounds of salt generation using crypt. - - See U(https://systemd.io/USER_RECORD/). - - This is required for I(state=present). When an existing user is updated this is checked against the stored hash in homed. - type: str - state: - description: - - The operation to take on the user. - choices: [ 'absent', 'present' ] - default: present - type: str - storage: - description: - - Indicates the storage mechanism for the user's home directory. - - If the storage type is not specified, ``homed.conf(5)`` defines which default storage to use. - - Only used when a user is first created. - choices: [ 'classic', 'luks', 'directory', 'subvolume', 'fscrypt', 'cifs' ] - type: str - disksize: - description: - - The intended home directory disk space. - - Human readable value such as C(10G), C(10M), or C(10B). - type: str - resize: - description: - - When used with I(disksize) this will attempt to resize the home directory immediately. - default: false - type: bool - realname: - description: - - The user's real ('human') name. - - This can also be used to add a comment to maintain compatability with C(useradd). - aliases: [ 'comment' ] - type: str - realm: - description: - - The 'realm' a user is defined in. - type: str - email: - description: - - The email address of the user. - type: str - location: - description: - - A free-form location string describing the location of the user. - type: str - iconname: - description: - - The name of an icon picked by the user, for example for the purpose of an avatar. - - Should follow the semantics defined in the Icon Naming Specification. - - See U(https://specifications.freedesktop.org/icon-naming-spec/icon-naming-spec-latest.html) for specifics. - type: str - homedir: - description: - - Path to use as home directory for the user. - - This is the directory the user's home directory is mounted to while the user is logged in. - - This is not where the user's data is actually stored, see I(imagepath) for that. - - Only used when a user is first created. - type: path - imagepath: - description: - - Path to place the user's home directory. - - See U(https://www.freedesktop.org/software/systemd/man/homectl.html#--image-path=PATH) for more information. - - Only used when a user is first created. - type: path - uid: - description: - - Sets the UID of the user. - - If using I(gid) homed requires the value to be the same. - - Only used when a user is first created. - type: int - gid: - description: - - Sets the gid of the user. - - If using I(uid) homed requires the value to be the same. - - Only used when a user is first created. - type: int - mountopts: - description: - - String separated by comma each indicating mount options for a users home directory. - - Valid options are C(nosuid), C(nodev) or C(noexec). - - Homed by default uses C(nodev) and C(nosuid) while C(noexec) is off. - type: str - umask: - description: - - Sets the umask for the user's login sessions - - Value from C(0000) to C(0777). - type: int - memberof: - description: - - String separated by comma each indicating a UNIX group this user shall be a member of. - - Groups the user should be a member of should be supplied as comma separated list. - aliases: [ 'groups' ] - type: str - skeleton: - description: - - The absolute path to the skeleton directory to populate a new home directory from. - - This is only used when a home directory is first created. - - If not specified homed by default uses C(/etc/skel). - aliases: [ 'skel' ] - type: path - shell: - description: - - Shell binary to use for terminal logins of given user. - - If not specified homed by default uses C(/bin/bash). - type: str - environment: - description: - - String separated by comma each containing an environment variable and its value to - set for the user's login session, in a format compatible with ``putenv()``. - - Any environment variable listed here is automatically set by pam_systemd for all - login sessions of the user. - aliases: [ 'setenv' ] - type: str - timezone: - description: - - Preferred timezone to use for the user. - - Should be a tzdata compatible location string such as C(America/New_York). - type: str - locked: - description: - - Whether the user account should be locked or not. - type: bool - language: - description: - - The preferred language/locale for the user. - - This should be in a format compatible with the C($LANG) environment variable. - type: str - passwordhint: - description: - - Password hint for the given user. - type: str - sshkeys: - description: - - String separated by comma each listing a SSH public key that is authorized to access the account. - - The keys should follow the same format as the lines in a traditional C(~/.ssh/authorized_key) file. - type: str - notbefore: - description: - - A time since the UNIX epoch before which the record should be considered invalid for the purpose of logging in. - type: int - notafter: - description: - - A time since the UNIX epoch after which the record should be considered invalid for the purpose of logging in. - type: int -''' - -EXAMPLES = ''' -- name: Add the user 'james' - community.general.homectl: - name: johnd - password: myreallysecurepassword1! - state: present - -- name: Add the user 'alice' with a zsh shell, uid of 1000, and gid of 2000 - community.general.homectl: - name: alice - password: myreallysecurepassword1! - state: present - shell: /bin/zsh - uid: 1000 - gid: 1000 - -- name: Modify an existing user 'frank' to have 10G of diskspace and resize usage now - community.general.homectl: - name: frank - password: myreallysecurepassword1! - state: present - disksize: 10G - resize: yes - -- name: Remove an existing user 'janet' - community.general.homectl: - name: janet - state: absent -''' - -RETURN = ''' -data: - description: A json dictionary returned from C(homectl inspect -j). - returned: success - type: dict - sample: { - "data": { - "binding": { - "e9ed2a5b0033427286b228e97c1e8343": { - "fileSystemType": "btrfs", - "fileSystemUuid": "7bd59491-2812-4642-a492-220c3f0c6c0b", - "gid": 60268, - "imagePath": "/home/james.home", - "luksCipher": "aes", - "luksCipherMode": "xts-plain64", - "luksUuid": "7f05825a-2c38-47b4-90e1-f21540a35a81", - "luksVolumeKeySize": 32, - "partitionUuid": "5a906126-d3c8-4234-b230-8f6e9b427b2f", - "storage": "luks", - "uid": 60268 - } - }, - "diskSize": 3221225472, - "disposition": "regular", - "lastChangeUSec": 1641941238208691, - "lastPasswordChangeUSec": 1641941238208691, - "privileged": { - "hashedPassword": [ - "$6$ov9AKni.trf76inT$tTtfSyHgbPTdUsG0CvSSQZXGqFGdHKQ9Pb6e0BTZhDmlgrL/vA5BxrXduBi8u/PCBiYUffGLIkGhApjKMK3bV." - ] - }, - "signature": [ - { - "data": "o6zVFbymcmk4YTVaY6KPQK23YCp+VkXdGEeniZeV1pzIbFzoaZBvVLPkNKMoPAQbodY5BYfBtuy41prNL78qAg==", - "key": "-----BEGIN PUBLIC KEY-----\nMCowBQYDK2VwAyEAbs7ELeiEYBxkUQhxZ+5NGyu6J7gTtZtZ5vmIw3jowcY=\n-----END PUBLIC KEY-----\n" - } - ], - "status": { - "e9ed2a5b0033427286b228e97c1e8343": { - "diskCeiling": 21845405696, - "diskFloor": 268435456, - "diskSize": 3221225472, - "service": "io.systemd.Home", - "signedLocally": true, - "state": "inactive" - } - }, - "userName": "james", - } - } -''' - -import crypt -import json -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.basic import jsonify -from ansible.module_utils.common.text.formatters import human_to_bytes - - -class Homectl(object): - '''#TODO DOC STRINGS''' - - def __init__(self, module): - self.module = module - self.state = module.params['state'] - self.name = module.params['name'] - self.password = module.params['password'] - self.storage = module.params['storage'] - self.disksize = module.params['disksize'] - self.resize = module.params['resize'] - self.realname = module.params['realname'] - self.realm = module.params['realm'] - self.email = module.params['email'] - self.location = module.params['location'] - self.iconname = module.params['iconname'] - self.homedir = module.params['homedir'] - self.imagepath = module.params['imagepath'] - self.uid = module.params['uid'] - self.gid = module.params['gid'] - self.umask = module.params['umask'] - self.memberof = module.params['memberof'] - self.skeleton = module.params['skeleton'] - self.shell = module.params['shell'] - self.environment = module.params['environment'] - self.timezone = module.params['timezone'] - self.locked = module.params['locked'] - self.passwordhint = module.params['passwordhint'] - self.sshkeys = module.params['sshkeys'] - self.language = module.params['language'] - self.notbefore = module.params['notbefore'] - self.notafter = module.params['notafter'] - self.mountopts = module.params['mountopts'] - - self.result = {} - - # Cannot run homectl commands if service is not active - def homed_service_active(self): - is_active = True - cmd = ['systemctl', 'show', 'systemd-homed.service', '-p', 'ActiveState'] - rc, show_service_stdout, stderr = self.module.run_command(cmd) - if rc == 0: - state = show_service_stdout.rsplit('=')[1] - if state.strip() != 'active': - is_active = False - return is_active - - def user_exists(self): - exists = False - valid_pw = False - # Get user properties if they exist in json - rc, stdout, stderr = self.get_user_metadata() - if rc == 0: - exists = True - # User exists now compare password given with current hashed password stored in the user metadata. - if self.state != 'absent': # Don't need checking on remove user - stored_pwhash = json.loads(stdout)['privileged']['hashedPassword'][0] - if self._check_password(stored_pwhash): - valid_pw = True - return exists, valid_pw - - def create_user(self): - record = self.create_json_record(create=True) - cmd = [self.module.get_bin_path('homectl', True)] - cmd.append('create') - cmd.append('--identity=-') # Read the user record from standard input. - return(self.module.run_command(cmd, data=record)) - - def _hash_password(self, password): - method = crypt.METHOD_SHA512 - salt = crypt.mksalt(method, rounds=10000) - pw_hash = crypt.crypt(password, salt) - return pw_hash - - def _check_password(self, pwhash): - hash = crypt.crypt(self.password, pwhash) - return pwhash == hash - - def remove_user(self): - cmd = [self.module.get_bin_path('homectl', True)] - cmd.append('remove') - cmd.append(self.name) - return self.module.run_command(cmd) - - def prepare_modify_user_command(self): - record = self.create_json_record() - cmd = [self.module.get_bin_path('homectl', True)] - cmd.append('update') - cmd.append(self.name) - cmd.append('--identity=-') # Read the user record from standard input. - # Resize disksize now resize = true - # This is not valid in user record (json) and requires it to be passed on command. - if self.disksize and self.resize: - cmd.append('--and-resize') - cmd.append('true') - self.result['changed'] = True - return cmd, record - - def get_user_metadata(self): - cmd = [self.module.get_bin_path('homectl', True)] - cmd.append('inspect') - cmd.append(self.name) - cmd.append('-j') - cmd.append('--no-pager') - rc, stdout, stderr = self.module.run_command(cmd) - return rc, stdout, stderr - - # Build up dictionary to jsonify for homectl commands. - def create_json_record(self, create=False): - record = {} - user_metadata = {} - self.result['changed'] = False - # Get the current user record if not creating a new user record. - if not create: - rc, user_metadata, stderr = self.get_user_metadata() - user_metadata = json.loads(user_metadata) - # Remove elements that are not meant to be updated from record. - # These are always part of the record when a user exists. - user_metadata.pop('signature', None) - user_metadata.pop('binding', None) - user_metadata.pop('status', None) - # Let last change Usec be updated by homed when command runs. - user_metadata.pop('lastChangeUSec', None) - # Now only change fields that are called on leaving whats currently in the record intact. - record = user_metadata - - record['userName'] = self.name - record['secret'] = {'password': [self.password]} - - if create: - password_hash = self._hash_password(self.password) - record['privileged'] = {'hashedPassword': [password_hash]} - self.result['changed'] = True - - if self.uid and self.gid and create: - record['uid'] = self.uid - record['gid'] = self.gid - self.result['changed'] = True - - if self.memberof: - member_list = list(self.memberof.split(',')) - if member_list != record.get('memberOf', [None]): - record['memberOf'] = member_list - self.result['changed'] = True - - if self.realname: - if self.realname != record.get('realName'): - record['realName'] = self.realname - self.result['changed'] = True - - # Cannot update storage unless were creating a new user. - # See 'Fields in the binding section' at https://systemd.io/USER_RECORD/ - if self.storage and create: - record['storage'] = self.storage - self.result['changed'] = True - - # Cannot update homedir unless were creating a new user. - # See 'Fields in the binding section' at https://systemd.io/USER_RECORD/ - if self.homedir and create: - record['homeDirectory'] = self.homedir - self.result['changed'] = True - - # Cannot update imagepath unless were creating a new user. - # See 'Fields in the binding section' at https://systemd.io/USER_RECORD/ - if self.imagepath and create: - record['imagePath'] = self.imagepath - self.result['changed'] = True - - if self.disksize: - # convert humand readble to bytes - if self.disksize != record.get('diskSize'): - record['diskSize'] = human_to_bytes(self.disksize) - self.result['changed'] = True - - if self.realm: - if self.realm != record.get('realm'): - record['realm'] = self.realm - self.result['changed'] = True - - if self.email: - if self.email != record.get('emailAddress'): - record['emailAddress'] = self.email - self.result['changed'] = True - - if self.location: - if self.location != record.get('location'): - record['location'] = self.location - self.result['changed'] = True - - if self.iconname: - if self.iconname != record.get('iconName'): - record['iconName'] = self.iconname - self.result['changed'] = True - - if self.skeleton: - if self.skeleton != record.get('skeletonDirectory'): - record['skeletonDirectory'] = self.skeleton - self.result['changed'] = True - - if self.shell: - if self.shell != record.get('shell'): - record['shell'] = self.shell - self.result['changed'] = True - - if self.umask: - if self.umask != record.get('umask'): - record['umask'] = self.umask - self.result['changed'] = True - - if self.environment: - if self.environment != record.get('environment', [None]): - record['environment'] = list(self.environment.split(',')) - self.result['changed'] = True - - if self.timezone: - if self.timezone != record.get('timeZone'): - record['timeZone'] = self.timezone - self.result['changed'] = True - - if self.locked: - if self.locked != record.get('locked'): - record['locked'] = self.locked - self.result['changed'] = True - - if self.passwordhint: - if self.passwordhint != record.get('privileged', {}).get('passwordHint'): - record['privileged']['passwordHint'] = self.passwordhint - self.result['changed'] = True - - if self.sshkeys: - if self.sshkeys != record.get('privileged', {}).get('sshAuthorizedKeys'): - record['privileged']['sshAuthorizedKeys'] = list(self.sshkeys.split(',')) - self.result['changed'] = True - - if self.language: - if self.locked != record.get('preferredLanguage'): - record['preferredLanguage'] = self.language - self.result['changed'] = True - - if self.notbefore: - if self.locked != record.get('notBeforeUSec'): - record['notBeforeUSec'] = self.notbefore - self.result['changed'] = True - - if self.notafter: - if self.locked != record.get('notAfterUSec'): - record['notAfterUSec'] = self.notafter - self.result['changed'] = True - - if self.mountopts: - opts = list(self.mountopts.split(',')) - if 'nosuid' in opts: - if record.get('mountNoSuid') is not True: - record['mountNoSuid'] = True - self.result['changed'] = True - else: - if record.get('mountNoSuid') is not False: - record['mountNoSuid'] = False - self.result['changed'] = True - - if 'nodev' in opts: - if record.get('mountNoDevices') is not True: - record['mountNoDevices'] = True - self.result['changed'] = True - else: - if record.get('mountNoDevices') is not False: - record['mountNoDevices'] = False - self.result['changed'] = True - - if 'noexec' in opts: - if record.get('mountNoExecute') is not True: - record['mountNoExecute'] = True - self.result['changed'] = True - else: - if record.get('mountNoExecute') is not False: - record['mountNoExecute'] = False - self.result['changed'] = True - - return jsonify(record) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - state=dict(type='str', default='present', choices=['absent', 'present']), - name=dict(type='str', required=True, aliases=['user', 'username']), - password=dict(type='str', no_log=True), - storage=dict(type='str', choices=['classic', 'luks', 'directory', 'subvolume', 'fscrypt', 'cifs']), - disksize=dict(type='str'), - resize=dict(type='bool', default=False), - realname=dict(type='str', aliases=['comment']), - realm=dict(type='str'), - email=dict(type='str'), - location=dict(type='str'), - iconname=dict(type='str'), - homedir=dict(type='path'), - imagepath=dict(type='path'), - uid=dict(type='int'), - gid=dict(type='int'), - umask=dict(type='int'), - environment=dict(type='str', aliases=['setenv']), - timezone=dict(type='str'), - memberof=dict(type='str', aliases=['groups']), - skeleton=dict(type='path', aliases=['skel']), - shell=dict(type='str'), - locked=dict(type='bool'), - passwordhint=dict(type='str', no_log=True), - sshkeys=dict(type='str', no_log=True), - language=dict(type='str'), - notbefore=dict(type='int'), - notafter=dict(type='int'), - mountopts=dict(type='str'), - ), - supports_check_mode=True, - - required_if=[ - ('state', 'present', ['password']), - ('resize', True, ['disksize']), - ] - ) - - homectl = Homectl(module) - homectl.result['state'] = homectl.state - - # First we need to make sure homed service is active - if not homectl.homed_service_active(): - module.fail_json(msg='systemd-homed.service is not active') - - # handle removing user - if homectl.state == 'absent': - user_exists, valid_pwhash = homectl.user_exists() - if user_exists: - if module.check_mode: - module.exit_json(changed=True) - rc, stdout, stderr = homectl.remove_user() - if rc != 0: - module.fail_json(name=homectl.name, msg=stderr, rc=rc) - homectl.result['changed'] = True - homectl.result['rc'] = rc - homectl.result['msg'] = 'User %s removed!' % homectl.name - else: - homectl.result['changed'] = False - homectl.result['msg'] = 'User does not exist!' - - # Handle adding a user - if homectl.state == 'present': - user_exists, valid_pwhash = homectl.user_exists() - if not user_exists: - if module.check_mode: - module.exit_json(changed=True) - rc, stdout, stderr = homectl.create_user() - if rc != 0: - module.fail_json(name=homectl.name, msg=stderr, rc=rc) - rc, user_metadata, stderr = homectl.get_user_metadata() - homectl.result['data'] = json.loads(user_metadata) - homectl.result['rc'] = rc - homectl.result['msg'] = 'User %s created!' % homectl.name - else: - if valid_pwhash: - # Run this to see if changed would be True or False which is useful for check_mode - cmd, record = homectl.prepare_modify_user_command() - else: - # User gave wrong password fail with message - homectl.result['changed'] = False - homectl.result['msg'] = 'User exists but password is incorrect!' - module.fail_json(**homectl.result) - - if module.check_mode: - module.exit_json(**homectl.result) - - # Now actually modify the user if changed was set to true at any point. - if homectl.result['changed']: - rc, stdout, stderr = module.run_command(cmd, data=record) - if rc != 0: - module.fail_json(name=homectl.name, msg=stderr, rc=rc, changed=False) - rc, user_metadata, stderr = homectl.get_user_metadata() - homectl.result['data'] = json.loads(user_metadata) - homectl.result['rc'] = rc - if homectl.result['changed']: - homectl.result['msg'] = 'User %s modified' % homectl.name - - module.exit_json(**homectl.result) - - -if __name__ == '__main__': - main() diff --git a/ansible_collections/community/general/plugins/modules/system/interfaces_file.py b/ansible_collections/community/general/plugins/modules/system/interfaces_file.py deleted file mode 100644 index 91cf74b4..00000000 --- a/ansible_collections/community/general/plugins/modules/system/interfaces_file.py +++ /dev/null @@ -1,372 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2016, Roman Belyakovsky -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: interfaces_file -short_description: Tweak settings in /etc/network/interfaces files -extends_documentation_fragment: files -description: - - Manage (add, remove, change) individual interface options in an interfaces-style file without having - to manage the file as a whole with, say, M(ansible.builtin.template) or M(ansible.builtin.assemble). Interface has to be presented in a file. - - Read information about interfaces from interfaces-styled files -options: - dest: - type: path - description: - - Path to the interfaces file - default: /etc/network/interfaces - iface: - type: str - description: - - Name of the interface, required for value changes or option remove - address_family: - type: str - description: - - Address family of the interface, useful if same interface name is used for both inet and inet6 - option: - type: str - description: - - Name of the option, required for value changes or option remove - value: - type: str - description: - - If I(option) is not presented for the I(interface) and I(state) is C(present) option will be added. - If I(option) already exists and is not C(pre-up), C(up), C(post-up) or C(down), it's value will be updated. - C(pre-up), C(up), C(post-up) and C(down) options can't be updated, only adding new options, removing existing - ones or cleaning the whole option set are supported - backup: - description: - - Create a backup file including the timestamp information so you can get - the original file back if you somehow clobbered it incorrectly. - type: bool - default: 'no' - state: - type: str - description: - - If set to C(absent) the option or section will be removed if present instead of created. - default: "present" - choices: [ "present", "absent" ] - -notes: - - If option is defined multiple times last one will be updated but all will be deleted in case of an absent state -requirements: [] -author: "Roman Belyakovsky (@hryamzik)" -''' - -RETURN = ''' -dest: - description: destination file/path - returned: success - type: str - sample: "/etc/network/interfaces" -ifaces: - description: interfaces dictionary - returned: success - type: complex - contains: - ifaces: - description: interface dictionary - returned: success - type: dict - contains: - eth0: - description: Name of the interface - returned: success - type: dict - contains: - address_family: - description: interface address family - returned: success - type: str - sample: "inet" - method: - description: interface method - returned: success - type: str - sample: "manual" - mtu: - description: other options, all values returned as strings - returned: success - type: str - sample: "1500" - pre-up: - description: list of C(pre-up) scripts - returned: success - type: list - sample: - - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" - - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" - up: - description: list of C(up) scripts - returned: success - type: list - sample: - - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" - - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" - post-up: - description: list of C(post-up) scripts - returned: success - type: list - sample: - - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" - - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" - down: - description: list of C(down) scripts - returned: success - type: list - sample: - - "route del -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" - - "route del -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" -... -''' - -EXAMPLES = ''' -- name: Set eth1 mtu configuration value to 8000 - community.general.interfaces_file: - dest: /etc/network/interfaces.d/eth1.cfg - iface: eth1 - option: mtu - value: 8000 - backup: yes - state: present - register: eth1_cfg -''' - -import os -import re -import tempfile - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_bytes - - -def line_dict(line): - return {'line': line, 'line_type': 'unknown'} - - -def make_option_dict(line, iface, option, value, address_family): - return {'line': line, 'iface': iface, 'option': option, 'value': value, 'line_type': 'option', 'address_family': address_family} - - -def get_option_value(line): - patt = re.compile(r'^\s+(?P